Change ldl_phys to cpu_physical_memory_read, fix pte address
[qemu/qemu_0_9_1_stable.git] / linux-user / signal.c
blob97f10957d4a05eb2ac9a4c02807408d68066cca2
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <stdarg.h>
24 #include <unistd.h>
25 #include <signal.h>
26 #include <errno.h>
27 #include <sys/ucontext.h>
29 #include "qemu.h"
31 //#define DEBUG_SIGNAL
33 #define MAX_SIGQUEUE_SIZE 1024
35 struct sigqueue {
36 struct sigqueue *next;
37 target_siginfo_t info;
40 struct emulated_sigaction {
41 struct target_sigaction sa;
42 int pending; /* true if signal is pending */
43 struct sigqueue *first;
44 struct sigqueue info; /* in order to always have memory for the
45 first signal, we put it here */
48 static struct emulated_sigaction sigact_table[TARGET_NSIG];
49 static struct sigqueue sigqueue_table[MAX_SIGQUEUE_SIZE]; /* siginfo queue */
50 static struct sigqueue *first_free; /* first free siginfo queue entry */
51 static int signal_pending; /* non zero if a signal may be pending */
53 static void host_signal_handler(int host_signum, siginfo_t *info,
54 void *puc);
56 static uint8_t host_to_target_signal_table[65] = {
57 [SIGHUP] = TARGET_SIGHUP,
58 [SIGINT] = TARGET_SIGINT,
59 [SIGQUIT] = TARGET_SIGQUIT,
60 [SIGILL] = TARGET_SIGILL,
61 [SIGTRAP] = TARGET_SIGTRAP,
62 [SIGABRT] = TARGET_SIGABRT,
63 /* [SIGIOT] = TARGET_SIGIOT,*/
64 [SIGBUS] = TARGET_SIGBUS,
65 [SIGFPE] = TARGET_SIGFPE,
66 [SIGKILL] = TARGET_SIGKILL,
67 [SIGUSR1] = TARGET_SIGUSR1,
68 [SIGSEGV] = TARGET_SIGSEGV,
69 [SIGUSR2] = TARGET_SIGUSR2,
70 [SIGPIPE] = TARGET_SIGPIPE,
71 [SIGALRM] = TARGET_SIGALRM,
72 [SIGTERM] = TARGET_SIGTERM,
73 #ifdef SIGSTKFLT
74 [SIGSTKFLT] = TARGET_SIGSTKFLT,
75 #endif
76 [SIGCHLD] = TARGET_SIGCHLD,
77 [SIGCONT] = TARGET_SIGCONT,
78 [SIGSTOP] = TARGET_SIGSTOP,
79 [SIGTSTP] = TARGET_SIGTSTP,
80 [SIGTTIN] = TARGET_SIGTTIN,
81 [SIGTTOU] = TARGET_SIGTTOU,
82 [SIGURG] = TARGET_SIGURG,
83 [SIGXCPU] = TARGET_SIGXCPU,
84 [SIGXFSZ] = TARGET_SIGXFSZ,
85 [SIGVTALRM] = TARGET_SIGVTALRM,
86 [SIGPROF] = TARGET_SIGPROF,
87 [SIGWINCH] = TARGET_SIGWINCH,
88 [SIGIO] = TARGET_SIGIO,
89 [SIGPWR] = TARGET_SIGPWR,
90 [SIGSYS] = TARGET_SIGSYS,
91 /* next signals stay the same */
93 static uint8_t target_to_host_signal_table[65];
95 static inline int host_to_target_signal(int sig)
97 return host_to_target_signal_table[sig];
100 static inline int target_to_host_signal(int sig)
102 return target_to_host_signal_table[sig];
105 static void host_to_target_sigset_internal(target_sigset_t *d,
106 const sigset_t *s)
108 int i;
109 unsigned long sigmask;
110 uint32_t target_sigmask;
112 sigmask = ((unsigned long *)s)[0];
113 target_sigmask = 0;
114 for(i = 0; i < 32; i++) {
115 if (sigmask & (1 << i))
116 target_sigmask |= 1 << (host_to_target_signal(i + 1) - 1);
118 #if TARGET_LONG_BITS == 32 && HOST_LONG_BITS == 32
119 d->sig[0] = target_sigmask;
120 for(i = 1;i < TARGET_NSIG_WORDS; i++) {
121 d->sig[i] = ((unsigned long *)s)[i];
123 #elif TARGET_LONG_BITS == 32 && HOST_LONG_BITS == 64 && TARGET_NSIG_WORDS == 2
124 d->sig[0] = target_sigmask;
125 d->sig[1] = sigmask >> 32;
126 #else
127 #warning host_to_target_sigset
128 #endif
131 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
133 target_sigset_t d1;
134 int i;
136 host_to_target_sigset_internal(&d1, s);
137 for(i = 0;i < TARGET_NSIG_WORDS; i++)
138 d->sig[i] = tswapl(d1.sig[i]);
141 void target_to_host_sigset_internal(sigset_t *d, const target_sigset_t *s)
143 int i;
144 unsigned long sigmask;
145 target_ulong target_sigmask;
147 target_sigmask = s->sig[0];
148 sigmask = 0;
149 for(i = 0; i < 32; i++) {
150 if (target_sigmask & (1 << i))
151 sigmask |= 1 << (target_to_host_signal(i + 1) - 1);
153 #if TARGET_LONG_BITS == 32 && HOST_LONG_BITS == 32
154 ((unsigned long *)d)[0] = sigmask;
155 for(i = 1;i < TARGET_NSIG_WORDS; i++) {
156 ((unsigned long *)d)[i] = s->sig[i];
158 #elif TARGET_LONG_BITS == 32 && HOST_LONG_BITS == 64 && TARGET_NSIG_WORDS == 2
159 ((unsigned long *)d)[0] = sigmask | ((unsigned long)(s->sig[1]) << 32);
160 #else
161 #warning target_to_host_sigset
162 #endif /* TARGET_LONG_BITS */
165 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
167 target_sigset_t s1;
168 int i;
170 for(i = 0;i < TARGET_NSIG_WORDS; i++)
171 s1.sig[i] = tswapl(s->sig[i]);
172 target_to_host_sigset_internal(d, &s1);
175 void host_to_target_old_sigset(target_ulong *old_sigset,
176 const sigset_t *sigset)
178 target_sigset_t d;
179 host_to_target_sigset(&d, sigset);
180 *old_sigset = d.sig[0];
183 void target_to_host_old_sigset(sigset_t *sigset,
184 const target_ulong *old_sigset)
186 target_sigset_t d;
187 int i;
189 d.sig[0] = *old_sigset;
190 for(i = 1;i < TARGET_NSIG_WORDS; i++)
191 d.sig[i] = 0;
192 target_to_host_sigset(sigset, &d);
195 /* siginfo conversion */
197 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
198 const siginfo_t *info)
200 int sig;
201 sig = host_to_target_signal(info->si_signo);
202 tinfo->si_signo = sig;
203 tinfo->si_errno = 0;
204 tinfo->si_code = 0;
205 if (sig == SIGILL || sig == SIGFPE || sig == SIGSEGV ||
206 sig == SIGBUS || sig == SIGTRAP) {
207 /* should never come here, but who knows. The information for
208 the target is irrelevant */
209 tinfo->_sifields._sigfault._addr = 0;
210 } else if (sig == SIGIO) {
211 tinfo->_sifields._sigpoll._fd = info->si_fd;
212 } else if (sig >= TARGET_SIGRTMIN) {
213 tinfo->_sifields._rt._pid = info->si_pid;
214 tinfo->_sifields._rt._uid = info->si_uid;
215 /* XXX: potential problem if 64 bit */
216 tinfo->_sifields._rt._sigval.sival_ptr =
217 (target_ulong)info->si_value.sival_ptr;
221 static void tswap_siginfo(target_siginfo_t *tinfo,
222 const target_siginfo_t *info)
224 int sig;
225 sig = info->si_signo;
226 tinfo->si_signo = tswap32(sig);
227 tinfo->si_errno = tswap32(info->si_errno);
228 tinfo->si_code = tswap32(info->si_code);
229 if (sig == SIGILL || sig == SIGFPE || sig == SIGSEGV ||
230 sig == SIGBUS || sig == SIGTRAP) {
231 tinfo->_sifields._sigfault._addr =
232 tswapl(info->_sifields._sigfault._addr);
233 } else if (sig == SIGIO) {
234 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
235 } else if (sig >= TARGET_SIGRTMIN) {
236 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
237 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
238 tinfo->_sifields._rt._sigval.sival_ptr =
239 tswapl(info->_sifields._rt._sigval.sival_ptr);
244 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
246 host_to_target_siginfo_noswap(tinfo, info);
247 tswap_siginfo(tinfo, tinfo);
250 /* XXX: we support only POSIX RT signals are used. */
251 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
252 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
254 info->si_signo = tswap32(tinfo->si_signo);
255 info->si_errno = tswap32(tinfo->si_errno);
256 info->si_code = tswap32(tinfo->si_code);
257 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
258 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
259 info->si_value.sival_ptr =
260 (void *)tswapl(tinfo->_sifields._rt._sigval.sival_ptr);
263 void signal_init(void)
265 struct sigaction act;
266 int i, j;
268 /* generate signal conversion tables */
269 for(i = 1; i <= 64; i++) {
270 if (host_to_target_signal_table[i] == 0)
271 host_to_target_signal_table[i] = i;
273 for(i = 1; i <= 64; i++) {
274 j = host_to_target_signal_table[i];
275 target_to_host_signal_table[j] = i;
278 /* set all host signal handlers. ALL signals are blocked during
279 the handlers to serialize them. */
280 sigfillset(&act.sa_mask);
281 act.sa_flags = SA_SIGINFO;
282 act.sa_sigaction = host_signal_handler;
283 for(i = 1; i < NSIG; i++) {
284 sigaction(i, &act, NULL);
287 memset(sigact_table, 0, sizeof(sigact_table));
289 first_free = &sigqueue_table[0];
290 for(i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++)
291 sigqueue_table[i].next = &sigqueue_table[i + 1];
292 sigqueue_table[MAX_SIGQUEUE_SIZE - 1].next = NULL;
295 /* signal queue handling */
297 static inline struct sigqueue *alloc_sigqueue(void)
299 struct sigqueue *q = first_free;
300 if (!q)
301 return NULL;
302 first_free = q->next;
303 return q;
306 static inline void free_sigqueue(struct sigqueue *q)
308 q->next = first_free;
309 first_free = q;
312 /* abort execution with signal */
313 void __attribute((noreturn)) force_sig(int sig)
315 int host_sig;
316 host_sig = target_to_host_signal(sig);
317 fprintf(stderr, "qemu: uncaught target signal %d (%s) - exiting\n",
318 sig, strsignal(host_sig));
319 #if 1
320 _exit(-host_sig);
321 #else
323 struct sigaction act;
324 sigemptyset(&act.sa_mask);
325 act.sa_flags = SA_SIGINFO;
326 act.sa_sigaction = SIG_DFL;
327 sigaction(SIGABRT, &act, NULL);
328 abort();
330 #endif
333 /* queue a signal so that it will be send to the virtual CPU as soon
334 as possible */
335 int queue_signal(int sig, target_siginfo_t *info)
337 struct emulated_sigaction *k;
338 struct sigqueue *q, **pq;
339 target_ulong handler;
341 #if defined(DEBUG_SIGNAL)
342 fprintf(stderr, "queue_signal: sig=%d\n",
343 sig);
344 #endif
345 k = &sigact_table[sig - 1];
346 handler = k->sa._sa_handler;
347 if (handler == TARGET_SIG_DFL) {
348 /* default handler : ignore some signal. The other are fatal */
349 if (sig != TARGET_SIGCHLD &&
350 sig != TARGET_SIGURG &&
351 sig != TARGET_SIGWINCH) {
352 force_sig(sig);
353 } else {
354 return 0; /* indicate ignored */
356 } else if (handler == TARGET_SIG_IGN) {
357 /* ignore signal */
358 return 0;
359 } else if (handler == TARGET_SIG_ERR) {
360 force_sig(sig);
361 } else {
362 pq = &k->first;
363 if (sig < TARGET_SIGRTMIN) {
364 /* if non real time signal, we queue exactly one signal */
365 if (!k->pending)
366 q = &k->info;
367 else
368 return 0;
369 } else {
370 if (!k->pending) {
371 /* first signal */
372 q = &k->info;
373 } else {
374 q = alloc_sigqueue();
375 if (!q)
376 return -EAGAIN;
377 while (*pq != NULL)
378 pq = &(*pq)->next;
381 *pq = q;
382 q->info = *info;
383 q->next = NULL;
384 k->pending = 1;
385 /* signal that a new signal is pending */
386 signal_pending = 1;
387 return 1; /* indicates that the signal was queued */
391 static void host_signal_handler(int host_signum, siginfo_t *info,
392 void *puc)
394 int sig;
395 target_siginfo_t tinfo;
397 /* the CPU emulator uses some host signals to detect exceptions,
398 we we forward to it some signals */
399 if (host_signum == SIGSEGV || host_signum == SIGBUS
400 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
401 || host_signum == SIGFPE
402 #endif
404 if (cpu_signal_handler(host_signum, info, puc))
405 return;
408 /* get target signal number */
409 sig = host_to_target_signal(host_signum);
410 if (sig < 1 || sig > TARGET_NSIG)
411 return;
412 #if defined(DEBUG_SIGNAL)
413 fprintf(stderr, "qemu: got signal %d\n", sig);
414 #endif
415 host_to_target_siginfo_noswap(&tinfo, info);
416 if (queue_signal(sig, &tinfo) == 1) {
417 /* interrupt the virtual CPU as soon as possible */
418 cpu_interrupt(global_env, CPU_INTERRUPT_EXIT);
422 int do_sigaction(int sig, const struct target_sigaction *act,
423 struct target_sigaction *oact)
425 struct emulated_sigaction *k;
426 struct sigaction act1;
427 int host_sig;
429 if (sig < 1 || sig > TARGET_NSIG || sig == SIGKILL || sig == SIGSTOP)
430 return -EINVAL;
431 k = &sigact_table[sig - 1];
432 #if defined(DEBUG_SIGNAL)
433 fprintf(stderr, "sigaction sig=%d act=0x%08x, oact=0x%08x\n",
434 sig, (int)act, (int)oact);
435 #endif
436 if (oact) {
437 oact->_sa_handler = tswapl(k->sa._sa_handler);
438 oact->sa_flags = tswapl(k->sa.sa_flags);
439 #if !defined(TARGET_MIPS)
440 oact->sa_restorer = tswapl(k->sa.sa_restorer);
441 #endif
442 oact->sa_mask = k->sa.sa_mask;
444 if (act) {
445 k->sa._sa_handler = tswapl(act->_sa_handler);
446 k->sa.sa_flags = tswapl(act->sa_flags);
447 #if !defined(TARGET_MIPS)
448 k->sa.sa_restorer = tswapl(act->sa_restorer);
449 #endif
450 k->sa.sa_mask = act->sa_mask;
452 /* we update the host linux signal state */
453 host_sig = target_to_host_signal(sig);
454 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
455 sigfillset(&act1.sa_mask);
456 act1.sa_flags = SA_SIGINFO;
457 if (k->sa.sa_flags & TARGET_SA_RESTART)
458 act1.sa_flags |= SA_RESTART;
459 /* NOTE: it is important to update the host kernel signal
460 ignore state to avoid getting unexpected interrupted
461 syscalls */
462 if (k->sa._sa_handler == TARGET_SIG_IGN) {
463 act1.sa_sigaction = (void *)SIG_IGN;
464 } else if (k->sa._sa_handler == TARGET_SIG_DFL) {
465 act1.sa_sigaction = (void *)SIG_DFL;
466 } else {
467 act1.sa_sigaction = host_signal_handler;
469 sigaction(host_sig, &act1, NULL);
472 return 0;
475 #ifndef offsetof
476 #define offsetof(type, field) ((size_t) &((type *)0)->field)
477 #endif
479 static inline int copy_siginfo_to_user(target_siginfo_t *tinfo,
480 const target_siginfo_t *info)
482 tswap_siginfo(tinfo, info);
483 return 0;
486 #ifdef TARGET_I386
488 /* from the Linux kernel */
490 struct target_fpreg {
491 uint16_t significand[4];
492 uint16_t exponent;
495 struct target_fpxreg {
496 uint16_t significand[4];
497 uint16_t exponent;
498 uint16_t padding[3];
501 struct target_xmmreg {
502 target_ulong element[4];
505 struct target_fpstate {
506 /* Regular FPU environment */
507 target_ulong cw;
508 target_ulong sw;
509 target_ulong tag;
510 target_ulong ipoff;
511 target_ulong cssel;
512 target_ulong dataoff;
513 target_ulong datasel;
514 struct target_fpreg _st[8];
515 uint16_t status;
516 uint16_t magic; /* 0xffff = regular FPU data only */
518 /* FXSR FPU environment */
519 target_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
520 target_ulong mxcsr;
521 target_ulong reserved;
522 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
523 struct target_xmmreg _xmm[8];
524 target_ulong padding[56];
527 #define X86_FXSR_MAGIC 0x0000
529 struct target_sigcontext {
530 uint16_t gs, __gsh;
531 uint16_t fs, __fsh;
532 uint16_t es, __esh;
533 uint16_t ds, __dsh;
534 target_ulong edi;
535 target_ulong esi;
536 target_ulong ebp;
537 target_ulong esp;
538 target_ulong ebx;
539 target_ulong edx;
540 target_ulong ecx;
541 target_ulong eax;
542 target_ulong trapno;
543 target_ulong err;
544 target_ulong eip;
545 uint16_t cs, __csh;
546 target_ulong eflags;
547 target_ulong esp_at_signal;
548 uint16_t ss, __ssh;
549 target_ulong fpstate; /* pointer */
550 target_ulong oldmask;
551 target_ulong cr2;
554 typedef struct target_sigaltstack {
555 target_ulong ss_sp;
556 int ss_flags;
557 target_ulong ss_size;
558 } target_stack_t;
560 struct target_ucontext {
561 target_ulong tuc_flags;
562 target_ulong tuc_link;
563 target_stack_t tuc_stack;
564 struct target_sigcontext tuc_mcontext;
565 target_sigset_t tuc_sigmask; /* mask last for extensibility */
568 struct sigframe
570 target_ulong pretcode;
571 int sig;
572 struct target_sigcontext sc;
573 struct target_fpstate fpstate;
574 target_ulong extramask[TARGET_NSIG_WORDS-1];
575 char retcode[8];
578 struct rt_sigframe
580 target_ulong pretcode;
581 int sig;
582 target_ulong pinfo;
583 target_ulong puc;
584 struct target_siginfo info;
585 struct target_ucontext uc;
586 struct target_fpstate fpstate;
587 char retcode[8];
591 * Set up a signal frame.
594 /* XXX: save x87 state */
595 static int
596 setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate,
597 CPUX86State *env, unsigned long mask)
599 int err = 0;
601 err |= __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
602 err |= __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
603 err |= __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
604 err |= __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
605 err |= __put_user(env->regs[R_EDI], &sc->edi);
606 err |= __put_user(env->regs[R_ESI], &sc->esi);
607 err |= __put_user(env->regs[R_EBP], &sc->ebp);
608 err |= __put_user(env->regs[R_ESP], &sc->esp);
609 err |= __put_user(env->regs[R_EBX], &sc->ebx);
610 err |= __put_user(env->regs[R_EDX], &sc->edx);
611 err |= __put_user(env->regs[R_ECX], &sc->ecx);
612 err |= __put_user(env->regs[R_EAX], &sc->eax);
613 err |= __put_user(env->exception_index, &sc->trapno);
614 err |= __put_user(env->error_code, &sc->err);
615 err |= __put_user(env->eip, &sc->eip);
616 err |= __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
617 err |= __put_user(env->eflags, &sc->eflags);
618 err |= __put_user(env->regs[R_ESP], &sc->esp_at_signal);
619 err |= __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
621 cpu_x86_fsave(env, (void *)fpstate, 1);
622 fpstate->status = fpstate->sw;
623 err |= __put_user(0xffff, &fpstate->magic);
624 err |= __put_user(fpstate, &sc->fpstate);
626 /* non-iBCS2 extensions.. */
627 err |= __put_user(mask, &sc->oldmask);
628 err |= __put_user(env->cr[2], &sc->cr2);
629 return err;
633 * Determine which stack to use..
636 static inline void *
637 get_sigframe(struct emulated_sigaction *ka, CPUX86State *env, size_t frame_size)
639 unsigned long esp;
641 /* Default to using normal stack */
642 esp = env->regs[R_ESP];
643 #if 0
644 /* This is the X/Open sanctioned signal stack switching. */
645 if (ka->sa.sa_flags & SA_ONSTACK) {
646 if (sas_ss_flags(esp) == 0)
647 esp = current->sas_ss_sp + current->sas_ss_size;
650 /* This is the legacy signal stack switching. */
651 else
652 #endif
653 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
654 !(ka->sa.sa_flags & TARGET_SA_RESTORER) &&
655 ka->sa.sa_restorer) {
656 esp = (unsigned long) ka->sa.sa_restorer;
658 return g2h((esp - frame_size) & -8ul);
661 static void setup_frame(int sig, struct emulated_sigaction *ka,
662 target_sigset_t *set, CPUX86State *env)
664 struct sigframe *frame;
665 int i, err = 0;
667 frame = get_sigframe(ka, env, sizeof(*frame));
669 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
670 goto give_sigsegv;
671 err |= __put_user((/*current->exec_domain
672 && current->exec_domain->signal_invmap
673 && sig < 32
674 ? current->exec_domain->signal_invmap[sig]
675 : */ sig),
676 &frame->sig);
677 if (err)
678 goto give_sigsegv;
680 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0]);
681 if (err)
682 goto give_sigsegv;
684 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
685 if (__put_user(set->sig[i], &frame->extramask[i - 1]))
686 goto give_sigsegv;
689 /* Set up to return from userspace. If provided, use a stub
690 already in userspace. */
691 if (ka->sa.sa_flags & TARGET_SA_RESTORER) {
692 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
693 } else {
694 err |= __put_user(frame->retcode, &frame->pretcode);
695 /* This is popl %eax ; movl $,%eax ; int $0x80 */
696 err |= __put_user(0xb858, (short *)(frame->retcode+0));
697 #if defined(TARGET_X86_64)
698 #warning "Fix this !"
699 #else
700 err |= __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
701 #endif
702 err |= __put_user(0x80cd, (short *)(frame->retcode+6));
705 if (err)
706 goto give_sigsegv;
708 /* Set up registers for signal handler */
709 env->regs[R_ESP] = h2g(frame);
710 env->eip = (unsigned long) ka->sa._sa_handler;
712 cpu_x86_load_seg(env, R_DS, __USER_DS);
713 cpu_x86_load_seg(env, R_ES, __USER_DS);
714 cpu_x86_load_seg(env, R_SS, __USER_DS);
715 cpu_x86_load_seg(env, R_CS, __USER_CS);
716 env->eflags &= ~TF_MASK;
718 return;
720 give_sigsegv:
721 if (sig == TARGET_SIGSEGV)
722 ka->sa._sa_handler = TARGET_SIG_DFL;
723 force_sig(TARGET_SIGSEGV /* , current */);
726 static void setup_rt_frame(int sig, struct emulated_sigaction *ka,
727 target_siginfo_t *info,
728 target_sigset_t *set, CPUX86State *env)
730 struct rt_sigframe *frame;
731 int i, err = 0;
733 frame = get_sigframe(ka, env, sizeof(*frame));
735 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
736 goto give_sigsegv;
738 err |= __put_user((/*current->exec_domain
739 && current->exec_domain->signal_invmap
740 && sig < 32
741 ? current->exec_domain->signal_invmap[sig]
742 : */sig),
743 &frame->sig);
744 err |= __put_user((target_ulong)&frame->info, &frame->pinfo);
745 err |= __put_user((target_ulong)&frame->uc, &frame->puc);
746 err |= copy_siginfo_to_user(&frame->info, info);
747 if (err)
748 goto give_sigsegv;
750 /* Create the ucontext. */
751 err |= __put_user(0, &frame->uc.tuc_flags);
752 err |= __put_user(0, &frame->uc.tuc_link);
753 err |= __put_user(/*current->sas_ss_sp*/ 0,
754 &frame->uc.tuc_stack.ss_sp);
755 err |= __put_user(/* sas_ss_flags(regs->esp) */ 0,
756 &frame->uc.tuc_stack.ss_flags);
757 err |= __put_user(/* current->sas_ss_size */ 0,
758 &frame->uc.tuc_stack.ss_size);
759 err |= setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate,
760 env, set->sig[0]);
761 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
762 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]))
763 goto give_sigsegv;
766 /* Set up to return from userspace. If provided, use a stub
767 already in userspace. */
768 if (ka->sa.sa_flags & TARGET_SA_RESTORER) {
769 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
770 } else {
771 err |= __put_user(frame->retcode, &frame->pretcode);
772 /* This is movl $,%eax ; int $0x80 */
773 err |= __put_user(0xb8, (char *)(frame->retcode+0));
774 err |= __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
775 err |= __put_user(0x80cd, (short *)(frame->retcode+5));
778 if (err)
779 goto give_sigsegv;
781 /* Set up registers for signal handler */
782 env->regs[R_ESP] = (unsigned long) frame;
783 env->eip = (unsigned long) ka->sa._sa_handler;
785 cpu_x86_load_seg(env, R_DS, __USER_DS);
786 cpu_x86_load_seg(env, R_ES, __USER_DS);
787 cpu_x86_load_seg(env, R_SS, __USER_DS);
788 cpu_x86_load_seg(env, R_CS, __USER_CS);
789 env->eflags &= ~TF_MASK;
791 return;
793 give_sigsegv:
794 if (sig == TARGET_SIGSEGV)
795 ka->sa._sa_handler = TARGET_SIG_DFL;
796 force_sig(TARGET_SIGSEGV /* , current */);
799 static int
800 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc, int *peax)
802 unsigned int err = 0;
804 cpu_x86_load_seg(env, R_GS, lduw(&sc->gs));
805 cpu_x86_load_seg(env, R_FS, lduw(&sc->fs));
806 cpu_x86_load_seg(env, R_ES, lduw(&sc->es));
807 cpu_x86_load_seg(env, R_DS, lduw(&sc->ds));
809 env->regs[R_EDI] = ldl(&sc->edi);
810 env->regs[R_ESI] = ldl(&sc->esi);
811 env->regs[R_EBP] = ldl(&sc->ebp);
812 env->regs[R_ESP] = ldl(&sc->esp);
813 env->regs[R_EBX] = ldl(&sc->ebx);
814 env->regs[R_EDX] = ldl(&sc->edx);
815 env->regs[R_ECX] = ldl(&sc->ecx);
816 env->eip = ldl(&sc->eip);
818 cpu_x86_load_seg(env, R_CS, lduw(&sc->cs) | 3);
819 cpu_x86_load_seg(env, R_SS, lduw(&sc->ss) | 3);
822 unsigned int tmpflags;
823 tmpflags = ldl(&sc->eflags);
824 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
825 // regs->orig_eax = -1; /* disable syscall checks */
829 struct _fpstate * buf;
830 buf = (void *)ldl(&sc->fpstate);
831 if (buf) {
832 #if 0
833 if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
834 goto badframe;
835 #endif
836 cpu_x86_frstor(env, (void *)buf, 1);
840 *peax = ldl(&sc->eax);
841 return err;
842 #if 0
843 badframe:
844 return 1;
845 #endif
848 long do_sigreturn(CPUX86State *env)
850 struct sigframe *frame = (struct sigframe *)g2h(env->regs[R_ESP] - 8);
851 target_sigset_t target_set;
852 sigset_t set;
853 int eax, i;
855 #if defined(DEBUG_SIGNAL)
856 fprintf(stderr, "do_sigreturn\n");
857 #endif
858 /* set blocked signals */
859 if (__get_user(target_set.sig[0], &frame->sc.oldmask))
860 goto badframe;
861 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
862 if (__get_user(target_set.sig[i], &frame->extramask[i - 1]))
863 goto badframe;
866 target_to_host_sigset_internal(&set, &target_set);
867 sigprocmask(SIG_SETMASK, &set, NULL);
869 /* restore registers */
870 if (restore_sigcontext(env, &frame->sc, &eax))
871 goto badframe;
872 return eax;
874 badframe:
875 force_sig(TARGET_SIGSEGV);
876 return 0;
879 long do_rt_sigreturn(CPUX86State *env)
881 struct rt_sigframe *frame = (struct rt_sigframe *)g2h(env->regs[R_ESP] - 4);
882 sigset_t set;
883 // stack_t st;
884 int eax;
886 #if 0
887 if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
888 goto badframe;
889 #endif
890 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
891 sigprocmask(SIG_SETMASK, &set, NULL);
893 if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax))
894 goto badframe;
896 #if 0
897 if (__copy_from_user(&st, &frame->uc.tuc_stack, sizeof(st)))
898 goto badframe;
899 /* It is more difficult to avoid calling this function than to
900 call it and ignore errors. */
901 do_sigaltstack(&st, NULL, regs->esp);
902 #endif
903 return eax;
905 badframe:
906 force_sig(TARGET_SIGSEGV);
907 return 0;
910 #elif defined(TARGET_ARM)
912 struct target_sigcontext {
913 target_ulong trap_no;
914 target_ulong error_code;
915 target_ulong oldmask;
916 target_ulong arm_r0;
917 target_ulong arm_r1;
918 target_ulong arm_r2;
919 target_ulong arm_r3;
920 target_ulong arm_r4;
921 target_ulong arm_r5;
922 target_ulong arm_r6;
923 target_ulong arm_r7;
924 target_ulong arm_r8;
925 target_ulong arm_r9;
926 target_ulong arm_r10;
927 target_ulong arm_fp;
928 target_ulong arm_ip;
929 target_ulong arm_sp;
930 target_ulong arm_lr;
931 target_ulong arm_pc;
932 target_ulong arm_cpsr;
933 target_ulong fault_address;
936 typedef struct target_sigaltstack {
937 target_ulong ss_sp;
938 int ss_flags;
939 target_ulong ss_size;
940 } target_stack_t;
942 struct target_ucontext {
943 target_ulong tuc_flags;
944 target_ulong tuc_link;
945 target_stack_t tuc_stack;
946 struct target_sigcontext tuc_mcontext;
947 target_sigset_t tuc_sigmask; /* mask last for extensibility */
950 struct sigframe
952 struct target_sigcontext sc;
953 target_ulong extramask[TARGET_NSIG_WORDS-1];
954 target_ulong retcode;
957 struct rt_sigframe
959 struct target_siginfo *pinfo;
960 void *puc;
961 struct target_siginfo info;
962 struct target_ucontext uc;
963 target_ulong retcode;
966 #define TARGET_CONFIG_CPU_32 1
969 * For ARM syscalls, we encode the syscall number into the instruction.
971 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
972 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
975 * For Thumb syscalls, we pass the syscall number via r7. We therefore
976 * need two 16-bit instructions.
978 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
979 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
981 static const target_ulong retcodes[4] = {
982 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
983 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
987 #define __put_user_error(x,p,e) __put_user(x, p)
988 #define __get_user_error(x,p,e) __get_user(x, p)
990 static inline int valid_user_regs(CPUState *regs)
992 return 1;
995 static int
996 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
997 CPUState *env, unsigned long mask)
999 int err = 0;
1001 __put_user_error(env->regs[0], &sc->arm_r0, err);
1002 __put_user_error(env->regs[1], &sc->arm_r1, err);
1003 __put_user_error(env->regs[2], &sc->arm_r2, err);
1004 __put_user_error(env->regs[3], &sc->arm_r3, err);
1005 __put_user_error(env->regs[4], &sc->arm_r4, err);
1006 __put_user_error(env->regs[5], &sc->arm_r5, err);
1007 __put_user_error(env->regs[6], &sc->arm_r6, err);
1008 __put_user_error(env->regs[7], &sc->arm_r7, err);
1009 __put_user_error(env->regs[8], &sc->arm_r8, err);
1010 __put_user_error(env->regs[9], &sc->arm_r9, err);
1011 __put_user_error(env->regs[10], &sc->arm_r10, err);
1012 __put_user_error(env->regs[11], &sc->arm_fp, err);
1013 __put_user_error(env->regs[12], &sc->arm_ip, err);
1014 __put_user_error(env->regs[13], &sc->arm_sp, err);
1015 __put_user_error(env->regs[14], &sc->arm_lr, err);
1016 __put_user_error(env->regs[15], &sc->arm_pc, err);
1017 #ifdef TARGET_CONFIG_CPU_32
1018 __put_user_error(cpsr_read(env), &sc->arm_cpsr, err);
1019 #endif
1021 __put_user_error(/* current->thread.trap_no */ 0, &sc->trap_no, err);
1022 __put_user_error(/* current->thread.error_code */ 0, &sc->error_code, err);
1023 __put_user_error(/* current->thread.address */ 0, &sc->fault_address, err);
1024 __put_user_error(mask, &sc->oldmask, err);
1026 return err;
1029 static inline void *
1030 get_sigframe(struct emulated_sigaction *ka, CPUState *regs, int framesize)
1032 unsigned long sp = regs->regs[13];
1034 #if 0
1036 * This is the X/Open sanctioned signal stack switching.
1038 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
1039 sp = current->sas_ss_sp + current->sas_ss_size;
1040 #endif
1042 * ATPCS B01 mandates 8-byte alignment
1044 return g2h((sp - framesize) & ~7);
1047 static int
1048 setup_return(CPUState *env, struct emulated_sigaction *ka,
1049 target_ulong *rc, void *frame, int usig)
1051 target_ulong handler = (target_ulong)ka->sa._sa_handler;
1052 target_ulong retcode;
1053 int thumb = 0;
1054 #if defined(TARGET_CONFIG_CPU_32)
1055 #if 0
1056 target_ulong cpsr = env->cpsr;
1059 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
1061 if (ka->sa.sa_flags & SA_THIRTYTWO)
1062 cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
1064 #ifdef CONFIG_ARM_THUMB
1065 if (elf_hwcap & HWCAP_THUMB) {
1067 * The LSB of the handler determines if we're going to
1068 * be using THUMB or ARM mode for this signal handler.
1070 thumb = handler & 1;
1072 if (thumb)
1073 cpsr |= T_BIT;
1074 else
1075 cpsr &= ~T_BIT;
1077 #endif
1078 #endif
1079 #endif /* TARGET_CONFIG_CPU_32 */
1081 if (ka->sa.sa_flags & TARGET_SA_RESTORER) {
1082 retcode = (target_ulong)ka->sa.sa_restorer;
1083 } else {
1084 unsigned int idx = thumb;
1086 if (ka->sa.sa_flags & TARGET_SA_SIGINFO)
1087 idx += 2;
1089 if (__put_user(retcodes[idx], rc))
1090 return 1;
1091 #if 0
1092 flush_icache_range((target_ulong)rc,
1093 (target_ulong)(rc + 1));
1094 #endif
1095 retcode = ((target_ulong)rc) + thumb;
1098 env->regs[0] = usig;
1099 env->regs[13] = h2g(frame);
1100 env->regs[14] = retcode;
1101 env->regs[15] = handler & (thumb ? ~1 : ~3);
1103 #if 0
1104 #ifdef TARGET_CONFIG_CPU_32
1105 env->cpsr = cpsr;
1106 #endif
1107 #endif
1109 return 0;
1112 static void setup_frame(int usig, struct emulated_sigaction *ka,
1113 target_sigset_t *set, CPUState *regs)
1115 struct sigframe *frame = get_sigframe(ka, regs, sizeof(*frame));
1116 int i, err = 0;
1118 err |= setup_sigcontext(&frame->sc, /*&frame->fpstate,*/ regs, set->sig[0]);
1120 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1121 if (__put_user(set->sig[i], &frame->extramask[i - 1]))
1122 return;
1125 if (err == 0)
1126 err = setup_return(regs, ka, &frame->retcode, frame, usig);
1127 // return err;
1130 static void setup_rt_frame(int usig, struct emulated_sigaction *ka,
1131 target_siginfo_t *info,
1132 target_sigset_t *set, CPUState *env)
1134 struct rt_sigframe *frame = get_sigframe(ka, env, sizeof(*frame));
1135 int i, err = 0;
1137 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
1138 return /* 1 */;
1140 __put_user_error(&frame->info, (target_ulong *)&frame->pinfo, err);
1141 __put_user_error(&frame->uc, (target_ulong *)&frame->puc, err);
1142 err |= copy_siginfo_to_user(&frame->info, info);
1144 /* Clear all the bits of the ucontext we don't use. */
1145 memset(&frame->uc, 0, offsetof(struct target_ucontext, tuc_mcontext));
1147 err |= setup_sigcontext(&frame->uc.tuc_mcontext, /*&frame->fpstate,*/
1148 env, set->sig[0]);
1149 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1150 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]))
1151 return;
1154 if (err == 0)
1155 err = setup_return(env, ka, &frame->retcode, frame, usig);
1157 if (err == 0) {
1159 * For realtime signals we must also set the second and third
1160 * arguments for the signal handler.
1161 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
1163 env->regs[1] = (target_ulong)frame->pinfo;
1164 env->regs[2] = (target_ulong)frame->puc;
1167 // return err;
1170 static int
1171 restore_sigcontext(CPUState *env, struct target_sigcontext *sc)
1173 int err = 0;
1174 uint32_t cpsr;
1176 __get_user_error(env->regs[0], &sc->arm_r0, err);
1177 __get_user_error(env->regs[1], &sc->arm_r1, err);
1178 __get_user_error(env->regs[2], &sc->arm_r2, err);
1179 __get_user_error(env->regs[3], &sc->arm_r3, err);
1180 __get_user_error(env->regs[4], &sc->arm_r4, err);
1181 __get_user_error(env->regs[5], &sc->arm_r5, err);
1182 __get_user_error(env->regs[6], &sc->arm_r6, err);
1183 __get_user_error(env->regs[7], &sc->arm_r7, err);
1184 __get_user_error(env->regs[8], &sc->arm_r8, err);
1185 __get_user_error(env->regs[9], &sc->arm_r9, err);
1186 __get_user_error(env->regs[10], &sc->arm_r10, err);
1187 __get_user_error(env->regs[11], &sc->arm_fp, err);
1188 __get_user_error(env->regs[12], &sc->arm_ip, err);
1189 __get_user_error(env->regs[13], &sc->arm_sp, err);
1190 __get_user_error(env->regs[14], &sc->arm_lr, err);
1191 __get_user_error(env->regs[15], &sc->arm_pc, err);
1192 #ifdef TARGET_CONFIG_CPU_32
1193 __get_user_error(cpsr, &sc->arm_cpsr, err);
1194 cpsr_write(env, cpsr, 0xffffffff);
1195 #endif
1197 err |= !valid_user_regs(env);
1199 return err;
1202 long do_sigreturn(CPUState *env)
1204 struct sigframe *frame;
1205 target_sigset_t set;
1206 sigset_t host_set;
1207 int i;
1210 * Since we stacked the signal on a 64-bit boundary,
1211 * then 'sp' should be word aligned here. If it's
1212 * not, then the user is trying to mess with us.
1214 if (env->regs[13] & 7)
1215 goto badframe;
1217 frame = (struct sigframe *)g2h(env->regs[13]);
1219 #if 0
1220 if (verify_area(VERIFY_READ, frame, sizeof (*frame)))
1221 goto badframe;
1222 #endif
1223 if (__get_user(set.sig[0], &frame->sc.oldmask))
1224 goto badframe;
1225 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1226 if (__get_user(set.sig[i], &frame->extramask[i - 1]))
1227 goto badframe;
1230 target_to_host_sigset_internal(&host_set, &set);
1231 sigprocmask(SIG_SETMASK, &host_set, NULL);
1233 if (restore_sigcontext(env, &frame->sc))
1234 goto badframe;
1236 #if 0
1237 /* Send SIGTRAP if we're single-stepping */
1238 if (ptrace_cancel_bpt(current))
1239 send_sig(SIGTRAP, current, 1);
1240 #endif
1241 return env->regs[0];
1243 badframe:
1244 force_sig(SIGSEGV /* , current */);
1245 return 0;
1248 long do_rt_sigreturn(CPUState *env)
1250 struct rt_sigframe *frame;
1251 sigset_t host_set;
1254 * Since we stacked the signal on a 64-bit boundary,
1255 * then 'sp' should be word aligned here. If it's
1256 * not, then the user is trying to mess with us.
1258 if (env->regs[13] & 7)
1259 goto badframe;
1261 frame = (struct rt_sigframe *)env->regs[13];
1263 #if 0
1264 if (verify_area(VERIFY_READ, frame, sizeof (*frame)))
1265 goto badframe;
1266 #endif
1267 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
1268 sigprocmask(SIG_SETMASK, &host_set, NULL);
1270 if (restore_sigcontext(env, &frame->uc.tuc_mcontext))
1271 goto badframe;
1273 #if 0
1274 /* Send SIGTRAP if we're single-stepping */
1275 if (ptrace_cancel_bpt(current))
1276 send_sig(SIGTRAP, current, 1);
1277 #endif
1278 return env->regs[0];
1280 badframe:
1281 force_sig(SIGSEGV /* , current */);
1282 return 0;
1285 #elif defined(TARGET_SPARC)
1287 #define __SUNOS_MAXWIN 31
1289 /* This is what SunOS does, so shall I. */
1290 struct target_sigcontext {
1291 target_ulong sigc_onstack; /* state to restore */
1293 target_ulong sigc_mask; /* sigmask to restore */
1294 target_ulong sigc_sp; /* stack pointer */
1295 target_ulong sigc_pc; /* program counter */
1296 target_ulong sigc_npc; /* next program counter */
1297 target_ulong sigc_psr; /* for condition codes etc */
1298 target_ulong sigc_g1; /* User uses these two registers */
1299 target_ulong sigc_o0; /* within the trampoline code. */
1301 /* Now comes information regarding the users window set
1302 * at the time of the signal.
1304 target_ulong sigc_oswins; /* outstanding windows */
1306 /* stack ptrs for each regwin buf */
1307 char *sigc_spbuf[__SUNOS_MAXWIN];
1309 /* Windows to restore after signal */
1310 struct {
1311 target_ulong locals[8];
1312 target_ulong ins[8];
1313 } sigc_wbuf[__SUNOS_MAXWIN];
1315 /* A Sparc stack frame */
1316 struct sparc_stackf {
1317 target_ulong locals[8];
1318 target_ulong ins[6];
1319 struct sparc_stackf *fp;
1320 target_ulong callers_pc;
1321 char *structptr;
1322 target_ulong xargs[6];
1323 target_ulong xxargs[1];
1326 typedef struct {
1327 struct {
1328 target_ulong psr;
1329 target_ulong pc;
1330 target_ulong npc;
1331 target_ulong y;
1332 target_ulong u_regs[16]; /* globals and ins */
1333 } si_regs;
1334 int si_mask;
1335 } __siginfo_t;
1337 typedef struct {
1338 unsigned long si_float_regs [32];
1339 unsigned long si_fsr;
1340 unsigned long si_fpqdepth;
1341 struct {
1342 unsigned long *insn_addr;
1343 unsigned long insn;
1344 } si_fpqueue [16];
1345 } qemu_siginfo_fpu_t;
1348 struct target_signal_frame {
1349 struct sparc_stackf ss;
1350 __siginfo_t info;
1351 qemu_siginfo_fpu_t *fpu_save;
1352 target_ulong insns[2] __attribute__ ((aligned (8)));
1353 target_ulong extramask[TARGET_NSIG_WORDS - 1];
1354 target_ulong extra_size; /* Should be 0 */
1355 qemu_siginfo_fpu_t fpu_state;
1357 struct target_rt_signal_frame {
1358 struct sparc_stackf ss;
1359 siginfo_t info;
1360 target_ulong regs[20];
1361 sigset_t mask;
1362 qemu_siginfo_fpu_t *fpu_save;
1363 unsigned int insns[2];
1364 stack_t stack;
1365 unsigned int extra_size; /* Should be 0 */
1366 qemu_siginfo_fpu_t fpu_state;
1369 #define UREG_O0 16
1370 #define UREG_O6 22
1371 #define UREG_I0 0
1372 #define UREG_I1 1
1373 #define UREG_I2 2
1374 #define UREG_I6 6
1375 #define UREG_I7 7
1376 #define UREG_L0 8
1377 #define UREG_FP UREG_I6
1378 #define UREG_SP UREG_O6
1380 static inline void *get_sigframe(struct emulated_sigaction *sa, CPUState *env, unsigned long framesize)
1382 unsigned long sp;
1384 sp = env->regwptr[UREG_FP];
1385 #if 0
1387 /* This is the X/Open sanctioned signal stack switching. */
1388 if (sa->sa_flags & TARGET_SA_ONSTACK) {
1389 if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7))
1390 sp = current->sas_ss_sp + current->sas_ss_size;
1392 #endif
1393 return g2h(sp - framesize);
1396 static int
1397 setup___siginfo(__siginfo_t *si, CPUState *env, target_ulong mask)
1399 int err = 0, i;
1401 err |= __put_user(env->psr, &si->si_regs.psr);
1402 err |= __put_user(env->pc, &si->si_regs.pc);
1403 err |= __put_user(env->npc, &si->si_regs.npc);
1404 err |= __put_user(env->y, &si->si_regs.y);
1405 for (i=0; i < 8; i++) {
1406 err |= __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
1408 for (i=0; i < 8; i++) {
1409 err |= __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
1411 err |= __put_user(mask, &si->si_mask);
1412 return err;
1415 #if 0
1416 static int
1417 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1418 CPUState *env, unsigned long mask)
1420 int err = 0;
1422 err |= __put_user(mask, &sc->sigc_mask);
1423 err |= __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
1424 err |= __put_user(env->pc, &sc->sigc_pc);
1425 err |= __put_user(env->npc, &sc->sigc_npc);
1426 err |= __put_user(env->psr, &sc->sigc_psr);
1427 err |= __put_user(env->gregs[1], &sc->sigc_g1);
1428 err |= __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
1430 return err;
1432 #endif
1433 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
1435 static void setup_frame(int sig, struct emulated_sigaction *ka,
1436 target_sigset_t *set, CPUState *env)
1438 struct target_signal_frame *sf;
1439 int sigframe_size, err, i;
1441 /* 1. Make sure everything is clean */
1442 //synchronize_user_stack();
1444 sigframe_size = NF_ALIGNEDSZ;
1446 sf = (struct target_signal_frame *)
1447 get_sigframe(ka, env, sigframe_size);
1449 //fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
1450 #if 0
1451 if (invalid_frame_pointer(sf, sigframe_size))
1452 goto sigill_and_return;
1453 #endif
1454 /* 2. Save the current process state */
1455 err = setup___siginfo(&sf->info, env, set->sig[0]);
1456 err |= __put_user(0, &sf->extra_size);
1458 //err |= save_fpu_state(regs, &sf->fpu_state);
1459 //err |= __put_user(&sf->fpu_state, &sf->fpu_save);
1461 err |= __put_user(set->sig[0], &sf->info.si_mask);
1462 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
1463 err |= __put_user(set->sig[i + 1], &sf->extramask[i]);
1466 for (i = 0; i < 8; i++) {
1467 err |= __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
1469 for (i = 0; i < 8; i++) {
1470 err |= __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
1472 if (err)
1473 goto sigsegv;
1475 /* 3. signal handler back-trampoline and parameters */
1476 env->regwptr[UREG_FP] = h2g(sf);
1477 env->regwptr[UREG_I0] = sig;
1478 env->regwptr[UREG_I1] = h2g(&sf->info);
1479 env->regwptr[UREG_I2] = h2g(&sf->info);
1481 /* 4. signal handler */
1482 env->pc = (unsigned long) ka->sa._sa_handler;
1483 env->npc = (env->pc + 4);
1484 /* 5. return to kernel instructions */
1485 if (ka->sa.sa_restorer)
1486 env->regwptr[UREG_I7] = (unsigned long)ka->sa.sa_restorer;
1487 else {
1488 env->regwptr[UREG_I7] = h2g(&(sf->insns[0]) - 2);
1490 /* mov __NR_sigreturn, %g1 */
1491 err |= __put_user(0x821020d8, &sf->insns[0]);
1493 /* t 0x10 */
1494 err |= __put_user(0x91d02010, &sf->insns[1]);
1495 if (err)
1496 goto sigsegv;
1498 /* Flush instruction space. */
1499 //flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
1500 // tb_flush(env);
1502 return;
1504 //sigill_and_return:
1505 force_sig(TARGET_SIGILL);
1506 sigsegv:
1507 //fprintf(stderr, "force_sig\n");
1508 force_sig(TARGET_SIGSEGV);
1510 static inline int
1511 restore_fpu_state(CPUState *env, qemu_siginfo_fpu_t *fpu)
1513 int err;
1514 #if 0
1515 #ifdef CONFIG_SMP
1516 if (current->flags & PF_USEDFPU)
1517 regs->psr &= ~PSR_EF;
1518 #else
1519 if (current == last_task_used_math) {
1520 last_task_used_math = 0;
1521 regs->psr &= ~PSR_EF;
1523 #endif
1524 current->used_math = 1;
1525 current->flags &= ~PF_USEDFPU;
1526 #endif
1527 #if 0
1528 if (verify_area (VERIFY_READ, fpu, sizeof(*fpu)))
1529 return -EFAULT;
1530 #endif
1532 #if 0
1533 /* XXX: incorrect */
1534 err = __copy_from_user(&env->fpr[0], &fpu->si_float_regs[0],
1535 (sizeof(unsigned long) * 32));
1536 #endif
1537 err |= __get_user(env->fsr, &fpu->si_fsr);
1538 #if 0
1539 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
1540 if (current->thread.fpqdepth != 0)
1541 err |= __copy_from_user(&current->thread.fpqueue[0],
1542 &fpu->si_fpqueue[0],
1543 ((sizeof(unsigned long) +
1544 (sizeof(unsigned long *)))*16));
1545 #endif
1546 return err;
1550 static void setup_rt_frame(int sig, struct emulated_sigaction *ka,
1551 target_siginfo_t *info,
1552 target_sigset_t *set, CPUState *env)
1554 fprintf(stderr, "setup_rt_frame: not implemented\n");
1557 long do_sigreturn(CPUState *env)
1559 struct target_signal_frame *sf;
1560 uint32_t up_psr, pc, npc;
1561 target_sigset_t set;
1562 sigset_t host_set;
1563 target_ulong fpu_save;
1564 int err, i;
1566 sf = (struct target_signal_frame *)g2h(env->regwptr[UREG_FP]);
1567 #if 0
1568 fprintf(stderr, "sigreturn\n");
1569 fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
1570 #endif
1571 //cpu_dump_state(env, stderr, fprintf, 0);
1573 /* 1. Make sure we are not getting garbage from the user */
1574 #if 0
1575 if (verify_area (VERIFY_READ, sf, sizeof (*sf)))
1576 goto segv_and_exit;
1577 #endif
1579 if (((uint) sf) & 3)
1580 goto segv_and_exit;
1582 err = __get_user(pc, &sf->info.si_regs.pc);
1583 err |= __get_user(npc, &sf->info.si_regs.npc);
1585 if ((pc | npc) & 3)
1586 goto segv_and_exit;
1588 /* 2. Restore the state */
1589 err |= __get_user(up_psr, &sf->info.si_regs.psr);
1591 /* User can only change condition codes and FPU enabling in %psr. */
1592 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
1593 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
1595 env->pc = pc;
1596 env->npc = npc;
1597 err |= __get_user(env->y, &sf->info.si_regs.y);
1598 for (i=0; i < 8; i++) {
1599 err |= __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
1601 for (i=0; i < 8; i++) {
1602 err |= __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
1605 err |= __get_user(fpu_save, (target_ulong *)&sf->fpu_save);
1607 //if (fpu_save)
1608 // err |= restore_fpu_state(env, fpu_save);
1610 /* This is pretty much atomic, no amount locking would prevent
1611 * the races which exist anyways.
1613 err |= __get_user(set.sig[0], &sf->info.si_mask);
1614 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1615 err |= (__get_user(set.sig[i], &sf->extramask[i - 1]));
1618 target_to_host_sigset_internal(&host_set, &set);
1619 sigprocmask(SIG_SETMASK, &host_set, NULL);
1621 if (err)
1622 goto segv_and_exit;
1624 return env->regwptr[0];
1626 segv_and_exit:
1627 force_sig(TARGET_SIGSEGV);
1630 long do_rt_sigreturn(CPUState *env)
1632 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
1633 return -ENOSYS;
1636 #elif defined(TARGET_MIPS)
1638 struct target_sigcontext {
1639 uint32_t sc_regmask; /* Unused */
1640 uint32_t sc_status;
1641 uint64_t sc_pc;
1642 uint64_t sc_regs[32];
1643 uint64_t sc_fpregs[32];
1644 uint32_t sc_ownedfp; /* Unused */
1645 uint32_t sc_fpc_csr;
1646 uint32_t sc_fpc_eir; /* Unused */
1647 uint32_t sc_used_math;
1648 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
1649 uint64_t sc_mdhi;
1650 uint64_t sc_mdlo;
1651 target_ulong sc_hi1; /* Was sc_cause */
1652 target_ulong sc_lo1; /* Was sc_badvaddr */
1653 target_ulong sc_hi2; /* Was sc_sigset[4] */
1654 target_ulong sc_lo2;
1655 target_ulong sc_hi3;
1656 target_ulong sc_lo3;
1659 struct sigframe {
1660 uint32_t sf_ass[4]; /* argument save space for o32 */
1661 uint32_t sf_code[2]; /* signal trampoline */
1662 struct target_sigcontext sf_sc;
1663 target_sigset_t sf_mask;
1666 /* Install trampoline to jump back from signal handler */
1667 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
1669 int err;
1672 * Set up the return code ...
1674 * li v0, __NR__foo_sigreturn
1675 * syscall
1678 err = __put_user(0x24020000 + syscall, tramp + 0);
1679 err |= __put_user(0x0000000c , tramp + 1);
1680 /* flush_cache_sigtramp((unsigned long) tramp); */
1681 return err;
1684 static inline int
1685 setup_sigcontext(CPUState *regs, struct target_sigcontext *sc)
1687 int err = 0;
1689 err |= __put_user(regs->PC[regs->current_tc], &sc->sc_pc);
1691 #define save_gp_reg(i) do { \
1692 err |= __put_user(regs->gpr[i][regs->current_tc], &sc->sc_regs[i]); \
1693 } while(0)
1694 __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2);
1695 save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6);
1696 save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10);
1697 save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14);
1698 save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18);
1699 save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22);
1700 save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26);
1701 save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30);
1702 save_gp_reg(31);
1703 #undef save_gp_reg
1705 err |= __put_user(regs->HI[0][regs->current_tc], &sc->sc_mdhi);
1706 err |= __put_user(regs->LO[0][regs->current_tc], &sc->sc_mdlo);
1708 /* Not used yet, but might be useful if we ever have DSP suppport */
1709 #if 0
1710 if (cpu_has_dsp) {
1711 err |= __put_user(mfhi1(), &sc->sc_hi1);
1712 err |= __put_user(mflo1(), &sc->sc_lo1);
1713 err |= __put_user(mfhi2(), &sc->sc_hi2);
1714 err |= __put_user(mflo2(), &sc->sc_lo2);
1715 err |= __put_user(mfhi3(), &sc->sc_hi3);
1716 err |= __put_user(mflo3(), &sc->sc_lo3);
1717 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
1719 /* same with 64 bit */
1720 #ifdef CONFIG_64BIT
1721 err |= __put_user(regs->hi, &sc->sc_hi[0]);
1722 err |= __put_user(regs->lo, &sc->sc_lo[0]);
1723 if (cpu_has_dsp) {
1724 err |= __put_user(mfhi1(), &sc->sc_hi[1]);
1725 err |= __put_user(mflo1(), &sc->sc_lo[1]);
1726 err |= __put_user(mfhi2(), &sc->sc_hi[2]);
1727 err |= __put_user(mflo2(), &sc->sc_lo[2]);
1728 err |= __put_user(mfhi3(), &sc->sc_hi[3]);
1729 err |= __put_user(mflo3(), &sc->sc_lo[3]);
1730 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
1732 #endif
1733 #endif
1735 #if 0
1736 err |= __put_user(!!used_math(), &sc->sc_used_math);
1738 if (!used_math())
1739 goto out;
1742 * Save FPU state to signal context. Signal handler will "inherit"
1743 * current FPU state.
1745 preempt_disable();
1747 if (!is_fpu_owner()) {
1748 own_fpu();
1749 restore_fp(current);
1751 err |= save_fp_context(sc);
1753 preempt_enable();
1754 out:
1755 #endif
1756 return err;
1759 static inline int
1760 restore_sigcontext(CPUState *regs, struct target_sigcontext *sc)
1762 int err = 0;
1764 err |= __get_user(regs->CP0_EPC, &sc->sc_pc);
1766 err |= __get_user(regs->HI[0][regs->current_tc], &sc->sc_mdhi);
1767 err |= __get_user(regs->LO[0][regs->current_tc], &sc->sc_mdlo);
1769 #define restore_gp_reg(i) do { \
1770 err |= __get_user(regs->gpr[i][regs->current_tc], &sc->sc_regs[i]); \
1771 } while(0)
1772 restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3);
1773 restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6);
1774 restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9);
1775 restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12);
1776 restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15);
1777 restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18);
1778 restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21);
1779 restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24);
1780 restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27);
1781 restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30);
1782 restore_gp_reg(31);
1783 #undef restore_gp_reg
1785 #if 0
1786 if (cpu_has_dsp) {
1787 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
1788 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
1789 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
1790 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
1791 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
1792 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
1793 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
1795 #ifdef CONFIG_64BIT
1796 err |= __get_user(regs->hi, &sc->sc_hi[0]);
1797 err |= __get_user(regs->lo, &sc->sc_lo[0]);
1798 if (cpu_has_dsp) {
1799 err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg);
1800 err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg);
1801 err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg);
1802 err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg);
1803 err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg);
1804 err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg);
1805 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
1807 #endif
1809 err |= __get_user(used_math, &sc->sc_used_math);
1810 conditional_used_math(used_math);
1812 preempt_disable();
1814 if (used_math()) {
1815 /* restore fpu context if we have used it before */
1816 own_fpu();
1817 err |= restore_fp_context(sc);
1818 } else {
1819 /* signal handler may have used FPU. Give it up. */
1820 lose_fpu();
1823 preempt_enable();
1824 #endif
1825 return err;
1828 * Determine which stack to use..
1830 static inline void *
1831 get_sigframe(struct emulated_sigaction *ka, CPUState *regs, size_t frame_size)
1833 unsigned long sp;
1835 /* Default to using normal stack */
1836 sp = regs->gpr[29][regs->current_tc];
1839 * FPU emulator may have it's own trampoline active just
1840 * above the user stack, 16-bytes before the next lowest
1841 * 16 byte boundary. Try to avoid trashing it.
1843 sp -= 32;
1845 #if 0
1846 /* This is the X/Open sanctioned signal stack switching. */
1847 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
1848 sp = current->sas_ss_sp + current->sas_ss_size;
1849 #endif
1851 return g2h((sp - frame_size) & ~7);
1854 static void setup_frame(int sig, struct emulated_sigaction * ka,
1855 target_sigset_t *set, CPUState *regs)
1857 struct sigframe *frame;
1858 int i;
1860 frame = get_sigframe(ka, regs, sizeof(*frame));
1861 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
1862 goto give_sigsegv;
1864 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
1866 if(setup_sigcontext(regs, &frame->sf_sc))
1867 goto give_sigsegv;
1869 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1870 if(__put_user(set->sig[i], &frame->sf_mask.sig[i]))
1871 goto give_sigsegv;
1875 * Arguments to signal handler:
1877 * a0 = signal number
1878 * a1 = 0 (should be cause)
1879 * a2 = pointer to struct sigcontext
1881 * $25 and PC point to the signal handler, $29 points to the
1882 * struct sigframe.
1884 regs->gpr[ 4][regs->current_tc] = sig;
1885 regs->gpr[ 5][regs->current_tc] = 0;
1886 regs->gpr[ 6][regs->current_tc] = h2g(&frame->sf_sc);
1887 regs->gpr[29][regs->current_tc] = h2g(frame);
1888 regs->gpr[31][regs->current_tc] = h2g(frame->sf_code);
1889 /* The original kernel code sets CP0_EPC to the handler
1890 * since it returns to userland using eret
1891 * we cannot do this here, and we must set PC directly */
1892 regs->PC[regs->current_tc] = regs->gpr[25][regs->current_tc] = ka->sa._sa_handler;
1893 return;
1895 give_sigsegv:
1896 force_sig(TARGET_SIGSEGV/*, current*/);
1897 return;
1900 long do_sigreturn(CPUState *regs)
1902 struct sigframe *frame;
1903 sigset_t blocked;
1904 target_sigset_t target_set;
1905 int i;
1907 #if defined(DEBUG_SIGNAL)
1908 fprintf(stderr, "do_sigreturn\n");
1909 #endif
1910 frame = (struct sigframe *) regs->gpr[29][regs->current_tc];
1911 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
1912 goto badframe;
1914 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1915 if(__get_user(target_set.sig[i], &frame->sf_mask.sig[i]))
1916 goto badframe;
1919 target_to_host_sigset_internal(&blocked, &target_set);
1920 sigprocmask(SIG_SETMASK, &blocked, NULL);
1922 if (restore_sigcontext(regs, &frame->sf_sc))
1923 goto badframe;
1925 #if 0
1927 * Don't let your children do this ...
1929 __asm__ __volatile__(
1930 "move\t$29, %0\n\t"
1931 "j\tsyscall_exit"
1932 :/* no outputs */
1933 :"r" (&regs));
1934 /* Unreached */
1935 #endif
1937 regs->PC[regs->current_tc] = regs->CP0_EPC;
1938 /* I am not sure this is right, but it seems to work
1939 * maybe a problem with nested signals ? */
1940 regs->CP0_EPC = 0;
1941 return 0;
1943 badframe:
1944 force_sig(TARGET_SIGSEGV/*, current*/);
1945 return 0;
1948 static void setup_rt_frame(int sig, struct emulated_sigaction *ka,
1949 target_siginfo_t *info,
1950 target_sigset_t *set, CPUState *env)
1952 fprintf(stderr, "setup_rt_frame: not implemented\n");
1955 long do_rt_sigreturn(CPUState *env)
1957 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
1958 return -ENOSYS;
1961 #else
1963 static void setup_frame(int sig, struct emulated_sigaction *ka,
1964 target_sigset_t *set, CPUState *env)
1966 fprintf(stderr, "setup_frame: not implemented\n");
1969 static void setup_rt_frame(int sig, struct emulated_sigaction *ka,
1970 target_siginfo_t *info,
1971 target_sigset_t *set, CPUState *env)
1973 fprintf(stderr, "setup_rt_frame: not implemented\n");
1976 long do_sigreturn(CPUState *env)
1978 fprintf(stderr, "do_sigreturn: not implemented\n");
1979 return -ENOSYS;
1982 long do_rt_sigreturn(CPUState *env)
1984 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
1985 return -ENOSYS;
1988 #endif
1990 void process_pending_signals(void *cpu_env)
1992 int sig;
1993 target_ulong handler;
1994 sigset_t set, old_set;
1995 target_sigset_t target_old_set;
1996 struct emulated_sigaction *k;
1997 struct sigqueue *q;
1999 if (!signal_pending)
2000 return;
2002 k = sigact_table;
2003 for(sig = 1; sig <= TARGET_NSIG; sig++) {
2004 if (k->pending)
2005 goto handle_signal;
2006 k++;
2008 /* if no signal is pending, just return */
2009 signal_pending = 0;
2010 return;
2012 handle_signal:
2013 #ifdef DEBUG_SIGNAL
2014 fprintf(stderr, "qemu: process signal %d\n", sig);
2015 #endif
2016 /* dequeue signal */
2017 q = k->first;
2018 k->first = q->next;
2019 if (!k->first)
2020 k->pending = 0;
2022 sig = gdb_handlesig (cpu_env, sig);
2023 if (!sig) {
2024 fprintf (stderr, "Lost signal\n");
2025 abort();
2028 handler = k->sa._sa_handler;
2029 if (handler == TARGET_SIG_DFL) {
2030 /* default handler : ignore some signal. The other are fatal */
2031 if (sig != TARGET_SIGCHLD &&
2032 sig != TARGET_SIGURG &&
2033 sig != TARGET_SIGWINCH) {
2034 force_sig(sig);
2036 } else if (handler == TARGET_SIG_IGN) {
2037 /* ignore sig */
2038 } else if (handler == TARGET_SIG_ERR) {
2039 force_sig(sig);
2040 } else {
2041 /* compute the blocked signals during the handler execution */
2042 target_to_host_sigset(&set, &k->sa.sa_mask);
2043 /* SA_NODEFER indicates that the current signal should not be
2044 blocked during the handler */
2045 if (!(k->sa.sa_flags & TARGET_SA_NODEFER))
2046 sigaddset(&set, target_to_host_signal(sig));
2048 /* block signals in the handler using Linux */
2049 sigprocmask(SIG_BLOCK, &set, &old_set);
2050 /* save the previous blocked signal state to restore it at the
2051 end of the signal execution (see do_sigreturn) */
2052 host_to_target_sigset_internal(&target_old_set, &old_set);
2054 /* if the CPU is in VM86 mode, we restore the 32 bit values */
2055 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
2057 CPUX86State *env = cpu_env;
2058 if (env->eflags & VM_MASK)
2059 save_v86_state(env);
2061 #endif
2062 /* prepare the stack frame of the virtual CPU */
2063 if (k->sa.sa_flags & TARGET_SA_SIGINFO)
2064 setup_rt_frame(sig, k, &q->info, &target_old_set, cpu_env);
2065 else
2066 setup_frame(sig, k, &target_old_set, cpu_env);
2067 if (k->sa.sa_flags & TARGET_SA_RESETHAND)
2068 k->sa._sa_handler = TARGET_SIG_DFL;
2070 if (q != &k->info)
2071 free_sigqueue(q);