Lynx framebuffers multidomain implementation.
[linux/elbrus.git] / arch / e2k / kernel / signal.c
blobbb6d4381c1635f232bea355895e66e1312e036fa
2 /* linux/arch/e2k/kernel/signal.c, v 1.10 08/21/2001.
3 *
4 * Copyright (C) 2001 MCST
5 */
7 #include <linux/context_tracking.h>
8 #include <linux/sched.h>
9 #include <linux/errno.h>
10 #include <linux/signal.h>
11 #include <linux/ptrace.h>
12 #include <linux/tracehook.h>
13 #include <linux/irqflags.h>
14 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
15 # include <linux/ftrace.h>
16 #endif
18 #include <asm/cpu_regs.h>
19 #include <asm/e2k_syswork.h>
20 #include <asm/uaccess.h>
21 #include <asm/process.h>
22 #include <asm/regs_state.h>
23 #include <asm/sge.h>
24 #include <asm/ucontext.h>
25 #include <linux/unistd.h>
26 #include <asm/lms.h>
27 #ifdef CONFIG_PROTECTED_MODE
28 #include <asm/3p.h>
29 #include <asm/e2k_ptypes.h>
30 #endif /* CONFIG_PROTECTED_MODE */
31 #include <asm/traps.h>
32 #include <asm/e2k_debug.h>
34 #undef DEBUG_SIG_MODE
35 #undef DebugSig
36 #define DEBUG_SIG_MODE 0 /* Signal handling */
37 #define DebugSig(...) DebugPrint(DEBUG_SIG_MODE ,##__VA_ARGS__)
39 #undef DEBUG_HS_MODE
40 #undef DebugHS
41 #define DEBUG_HS_MODE 0 /* Signal handling */
42 #define DebugHS(...) DebugPrint(DEBUG_HS_MODE ,##__VA_ARGS__)
44 #undef DEBUG_SLJ_MODE
45 #undef DebugSLJ
46 #define DEBUG_SLJ_MODE 0 /* Signal long jump handling */
47 #define DebugSLJ(...) DebugPrint(DEBUG_SLJ_MODE ,##__VA_ARGS__)
49 #define DEBUG_FTRACE_MODE 0
50 #if DEBUG_FTRACE_MODE
51 # define DebugFTRACE(...) pr_info(__VA_ARGS__)
52 #else
53 # define DebugFTRACE(...)
54 #endif
56 #define DEBUG_SRT_MODE 0 /* Signal return handling */
57 #define DebugSRT(...) DebugPrint(DEBUG_SRT_MODE ,##__VA_ARGS__)
59 #define DEBUG_CTX_MODE 0 /* setcontext/swapcontext */
60 #if DEBUG_CTX_MODE
61 #define DebugCTX(...) DebugPrint(DEBUG_CTX_MODE ,##__VA_ARGS__)
62 #else
63 #define DebugCTX(...)
64 #endif
66 typedef struct rt_sigframe {
67 siginfo_t info;
68 union {
69 struct ucontext uc;
70 #ifdef CONFIG_PROTECTED_MODE
71 struct ucontext_prot __pad;
72 #endif
74 sigset_t saved_set;
75 } rt_sigframe_t;
77 extern int constrict_hardware_stacks(pt_regs_t *curr_regs, pt_regs_t *user_env);
78 extern int go_hd_stk_down(e2k_psp_hi_t psp_hi,
79 e2k_pcsp_lo_t pcsp_lo, e2k_pcsp_hi_t pcsp_hi,
80 int down,
81 e2k_addr_t *psp_ind, e2k_addr_t *pcsp_ind,
82 e2k_size_t *wd_psize, int *sw_num_p,
83 e2k_mem_crs_t *crs, int user_stacks);
85 static int do_sigreturn(pt_regs_t *regs, unsigned long signo,
86 rt_sigframe_t *user_sigframe);
89 void
90 sig_to_exit(int errno)
92 struct siginfo si;
93 struct k_sigaction *ka;
95 DebugSig("start\n");
97 ka = &current->sighand->action[SIGSEGV-1];
98 ka->sa.sa_handler = SIG_DFL;
100 si.si_signo = SIGSEGV;
101 si.si_errno = 0;
102 si.si_code = SI_KERNEL;
103 force_sig_info(SIGSEGV, &si, current);
105 DebugSig("finish\n");
106 return;
110 static inline void adjust_intr_counter(struct pt_regs *regs)
112 int nr = 0;
114 do {
115 if (from_trap(regs))
116 ++nr;
118 regs = regs->next;
119 } while (regs);
121 current->thread.intr_counter = nr;
124 static inline void
125 copy_jmp_regs(pt_regs_t *from, pt_regs_t *to)
127 to->stacks.sbr = from->stacks.sbr;
128 to->wd = from->wd;
129 to->stacks.usd_lo = from->stacks.usd_lo;
130 to->stacks.usd_hi = from->stacks.usd_hi;
131 to->stacks.psp_lo = from->stacks.psp_lo;
132 to->stacks.psp_hi = from->stacks.psp_hi;
133 to->stacks.pcsp_lo = from->stacks.pcsp_lo;
134 to->stacks.pcsp_hi = from->stacks.pcsp_hi;
135 to->stacks.u_stk_base_old = from->stacks.u_stk_base_old;
136 to->stacks.u_stk_top_old = from->stacks.u_stk_top_old;
137 to->stacks.u_stk_sz_old = from->stacks.u_stk_sz_old;
138 to->stacks.alt_stack_old = from->stacks.alt_stack_old;
139 to->stacks.valid = from->stacks.valid;
140 to->crs.cr0_lo = from->crs.cr0_lo;
141 to->crs.cr0_hi = from->crs.cr0_hi;
142 to->crs.cr1_lo = from->crs.cr1_lo;
143 to->crs.cr1_hi = from->crs.cr1_hi;
144 to->sys_rval = from->sys_rval;
147 static inline int
148 check_jump_info(struct jmp_info *jmp_info)
151 * We should check that we can do changing of user hard snacks
154 if ((current->thread.flags & E2K_FLAG_32BIT) &&
155 jmp_info->ip > TASK32_SIZE)
156 return -EFAULT;
158 if (jmp_info->ip > TASK_SIZE)
159 return -EFAULT;
161 return 0;
164 static inline int
165 copy_jmpinfo_from_user(struct jmp_info *from, struct jmp_info *to)
167 int rval;
169 if (!access_ok(VERIFY_READ, from, sizeof(struct jmp_info)))
170 return -EFAULT;
172 rval = __get_user(to->sigmask, &from->sigmask);
173 rval |= __get_user(to->ip, &from->ip);
174 rval |= __get_user(to->cr1lo, &from->cr1lo);
175 rval |= __get_user(to->pcsplo, &from->pcsplo);
176 rval |= __get_user(to->pcsphi, &from->pcsphi);
177 rval |= __get_user(to->pcshtp, &from->pcshtp);
178 rval |= __get_user(to->br, &from->br);
179 rval |= __get_user(to->reserv1, &from->reserv1);
180 rval |= __get_user(to->reserv2, &from->reserv2);
182 if (rval)
183 return -EFAULT;
185 return check_jump_info(to);
188 void _NSIG_WORDS_is_unsupported_size(void)
190 sig_to_exit(-EINVAL);
193 static inline int setup_frame(struct sigcontext *sigc, siginfo_t *info,
194 struct extra_ucontext *extra, pt_regs_t *user_regs)
196 struct trap_pt_regs *trap = user_regs->trap;
197 int rval = 0;
198 int i;
199 char tag;
201 rval |= __put_user(AS_WORD(user_regs->crs.cr0_lo), &sigc->cr0_lo);
202 rval |= __put_user(AS_WORD(user_regs->crs.cr0_hi), &sigc->cr0_hi);
203 rval |= __put_user(AS_WORD(user_regs->crs.cr1_lo), &sigc->cr1_lo);
204 rval |= __put_user(AS_WORD(user_regs->crs.cr1_hi), &sigc->cr1_hi);
206 rval |= __put_user(user_regs->stacks.sbr, &sigc->sbr);
207 rval |= __put_user(AS_WORD(user_regs->stacks.usd_lo), &sigc->usd_lo);
208 rval |= __put_user(AS_WORD(user_regs->stacks.usd_hi), &sigc->usd_hi);
209 rval |= __put_user(AS_WORD(user_regs->stacks.psp_lo), &sigc->psp_lo);
210 rval |= __put_user(AS_WORD(user_regs->stacks.psp_hi), &sigc->psp_hi);
211 rval |= __put_user(AS_WORD(user_regs->stacks.pcsp_lo), &sigc->pcsp_lo);
212 rval |= __put_user(AS_WORD(user_regs->stacks.pcsp_hi), &sigc->pcsp_hi);
214 /* for binary compiler */
215 if (unlikely(TASK_IS_BINCO(current))) {
216 #ifdef CONFIG_SECONDARY_SPACE_SUPPORT
217 int mlt_num = user_regs->mlt_state.num;
218 #endif
220 rval |= __put_user(AS_WORD(current_thread_info()->upsr),
221 &sigc->upsr);
222 rval |= __put_user(user_regs->rpr_hi, &sigc->rpr_hi);
223 rval |= __put_user(user_regs->rpr_lo, &sigc->rpr_lo);
225 /* copy MLT */
226 #ifdef CONFIG_SECONDARY_SPACE_SUPPORT
227 if (mlt_num) {
228 if (copy_to_user((void *)sigc->mlt,
229 user_regs->mlt_state.mlt,
230 sizeof(e2k_mlt_entry_t) * mlt_num))
231 rval |= -EFAULT;
233 if (mlt_num < E2K_MAX_MLT_SIZE) {
234 if (clear_user((void *)&sigc->mlt[mlt_num * 3],
235 sizeof(e2k_mlt_entry_t) *
236 (E2K_MAX_MLT_SIZE - mlt_num)))
237 rval |= -EFAULT;
239 #endif
241 if (trap) {
242 for (i = 0; i < MAX_TC_SIZE; i++) {
243 rval |= __put_user(trap->tcellar[i].address,
244 &sigc->trap_cell_addr[i]);
245 rval |= __put_user(trap->tcellar[i].data,
246 &sigc->trap_cell_val[i]);
247 rval |= __put_user(trap->tcellar[i].condition.word,
248 &sigc->trap_cell_info[i]);
249 tag = E2K_LOAD_TAGD(&trap->tcellar[i].data);
250 rval |= __put_user(tag, &sigc->trap_cell_tag[i]);
252 /* TIR */
253 rval |= __put_user(trap->nr_TIRs, &sigc->nr_TIRs);
254 for (i = 0; i <= trap->nr_TIRs; i++) {
255 rval |= __put_user(trap->TIRs[i].TIR_hi.TIR_hi_reg,
256 &sigc->tir_hi[i]);
257 rval |= __put_user(trap->TIRs[i].TIR_lo.TIR_lo_reg,
258 &sigc->tir_lo[i]);
260 rval |= __put_user(trap->tc_count / 3, &extra->tc_count);
261 rval |= __put_user(trap->curr_cnt, &extra->curr_cnt);
262 } else {
263 rval |= __put_user(0, &sigc->nr_TIRs);
264 rval |= __put_user(0ULL, &sigc->tir_hi[0]);
265 rval |= __put_user(0ULL, &sigc->tir_lo[0]);
266 rval |= __put_user(0, &extra->tc_count);
267 rval |= __put_user(-1, &extra->curr_cnt);
269 rval |= __put_user(AW(user_regs->ctpr1), &extra->ctpr1);
270 rval |= __put_user(AW(user_regs->ctpr2), &extra->ctpr2);
271 rval |= __put_user(AW(user_regs->ctpr3), &extra->ctpr3);
272 /* size of saved extra elements */
273 rval |= __put_user(sizeof(struct extra_ucontext) - sizeof(int),
274 &extra->sizeof_extra_uc);
275 /* DAM */
276 SAVE_DAM(current_thread_info()->dam);
277 for (i = 0; i < DAM_ENTRIES_NUM; i++)
278 rval |= __put_user(current_thread_info()->dam[i],
279 &sigc->dam[i]);
281 return rval;
284 static inline int setup_ucontext32(struct ucontext_32 *uc, siginfo_t *info,
285 sigset32_t *oldset, e2k_usd_lo_t ss_usd_lo, pt_regs_t *user_regs)
287 int rval = 0;
289 rval |= __put_user(0, &uc->uc_flags);
290 rval |= __put_user(0, &uc->uc_link);
291 rval |= __put_user((int)current->sas_ss_sp, &uc->uc_stack.ss_sp);
292 rval |= __put_user(sas_ss_flags(AS_STRUCT(ss_usd_lo).base),
293 &uc->uc_stack.ss_flags);
294 rval |= __put_user((int)current->sas_ss_size, &uc->uc_stack.ss_size);
296 rval |= setup_frame(&uc->uc_mcontext, info, &uc->uc_extra, user_regs);
298 rval |= __copy_to_user(&uc->uc_sigmask, oldset, sizeof(*oldset));
300 return rval;
303 static inline int setup_ucontext(struct ucontext *uc, siginfo_t *info,
304 sigset_t *oldset, e2k_usd_lo_t ss_usd_lo, pt_regs_t *user_regs)
306 int rval = 0;
308 rval |= __put_user(0, &uc->uc_flags);
309 rval |= __put_user(0, &uc->uc_link);
310 rval |= __put_user((void *)current->sas_ss_sp, &uc->uc_stack.ss_sp);
311 rval |= __put_user(sas_ss_flags(AS_STRUCT(ss_usd_lo).base),
312 &uc->uc_stack.ss_flags);
313 rval |= __put_user(current->sas_ss_size, &uc->uc_stack.ss_size);
315 rval |= setup_frame(&uc->uc_mcontext, info, &uc->uc_extra, user_regs);
317 rval |= __copy_to_user(&uc->uc_sigmask, oldset, sizeof(*oldset));
319 return rval;
322 #ifdef CONFIG_PROTECTED_MODE
323 static inline int setup_prot_frame(struct sigcontext_prot *sigc,
324 siginfo_t *info, pt_regs_t *user_regs)
326 int rval;
328 rval = __put_user(AS_WORD(user_regs->crs.cr0_lo), &sigc->cr0_lo);
329 rval |= __put_user(AS_WORD(user_regs->crs.cr0_hi), &sigc->cr0_hi);
330 rval |= __put_user(AS_WORD(user_regs->crs.cr1_lo), &sigc->cr1_lo);
331 rval |= __put_user(AS_WORD(user_regs->crs.cr1_hi), &sigc->cr1_hi);
333 rval |= __put_user(user_regs->stacks.sbr, &sigc->sbr);
334 rval |= __put_user(AS_WORD(user_regs->stacks.usd_lo), &sigc->usd_lo);
335 rval |= __put_user(AS_WORD(user_regs->stacks.usd_hi), &sigc->usd_hi);
336 rval |= __put_user(AS_WORD(user_regs->stacks.psp_lo), &sigc->psp_lo);
337 rval |= __put_user(AS_WORD(user_regs->stacks.psp_hi), &sigc->psp_hi);
338 rval |= __put_user(AS_WORD(user_regs->stacks.pcsp_lo), &sigc->pcsp_lo);
339 rval |= __put_user(AS_WORD(user_regs->stacks.pcsp_hi), &sigc->pcsp_hi);
341 return rval;
344 static inline int setup_ucontext_prot(struct ucontext_prot *uc, siginfo_t *info,
345 sigset_t *oldset, e2k_usd_lo_t ss_usd_lo, pt_regs_t *user_regs)
347 struct thread_info *ti = current_thread_info();
348 int rval = 0;
350 rval |= __put_user(0, &uc->uc_flags);
351 rval |= __put_user(0, &AW(uc->uc_link).lo);
352 rval |= __put_user(0, &AW(uc->uc_link).hi);
354 AW(ti->ss_sp).lo = MAKE_SAP_LO(current->sas_ss_sp,
355 current->sas_ss_size, 0, 3);
356 AW(ti->ss_sp).hi = MAKE_SAP_HI(current->sas_ss_sp,
357 current->sas_ss_size, 0, 3);
358 rval |= copy_to_user_with_tags(&uc->uc_stack.ss_sp,
359 &ti->ss_sp, sizeof(ti->ss_sp));
360 rval |= __put_user(sas_ss_flags(AS_STRUCT(ss_usd_lo).base),
361 &uc->uc_stack.ss_flags);
362 rval |= __put_user(current->sas_ss_size, &uc->uc_stack.ss_size);
364 rval |= setup_prot_frame(&uc->uc_mcontext, info, user_regs);
366 rval |= __copy_to_user(&uc->uc_sigmask, oldset, sizeof(*oldset));
368 return rval;
370 #endif /* CONFIG_PROTECTED_MODE */
372 #define printk printk_fixed_args
373 static inline int setup_rt_frame(struct ucontext *uc, siginfo_t *u_si, siginfo_t *info,
374 sigset_t *oldset, e2k_usd_lo_t ss_usd_lo, pt_regs_t *user_regs)
376 int rval = 0;
378 if (current->thread.flags & E2K_FLAG_32BIT) {
379 #ifdef CONFIG_PROTECTED_MODE
380 if (!(current->thread.flags & E2K_FLAG_PROTECTED_MODE)) {
381 #endif /* CONFIG_PROTECTED_MODE */
382 if (current->thread.flags & E2K_FLAG_64BIT_BINCO)
383 rval = copy_siginfo_to_user(u_si, info);
384 else
385 rval = copy_siginfo_to_user32(
386 (compat_siginfo_t *) u_si, info);
387 if (rval) {
388 DebugHS("bad copy_siginfo_to_user32 from 0x%p to 0x%p rval %d\n",
389 info, u_si, rval);
390 return rval;
392 rval = setup_ucontext32((struct ucontext_32 *)uc, info,
393 (sigset32_t *)oldset, ss_usd_lo,
394 user_regs);
395 #ifdef CONFIG_PROTECTED_MODE
396 } else {
397 rval = copy_siginfo_to_user(u_si, info);
398 if (rval) {
399 DebugHS("bad copy_siginfo_"
400 "to_user from 0x%p to 0x%p rval %d\n",
401 info, u_si, rval);
402 return rval;
404 rval = setup_ucontext_prot((struct ucontext_prot *) uc,
405 info, oldset, ss_usd_lo, user_regs);
407 #endif /* CONFIG_PROTECTED_MODE */
408 } else {
409 rval = copy_siginfo_to_user(u_si, info);
410 if (rval) {
411 DebugHS("bad copy_siginfo_to_user() "
412 "from 0x%p to 0x%p rval %d\n",
413 info, u_si, rval);
414 return rval;
416 rval = setup_ucontext(uc, info, oldset, ss_usd_lo, user_regs);
419 return rval;
421 #undef printk
423 static int sigset_restore(rt_sigframe_t *user_sigframe, pt_regs_t *regs)
425 struct k_sigaction *ka = &regs->ka;
426 sigset_t *set_ptr;
427 sigset_t set;
429 if (ka->sa.sa_flags & SA_SIGINFO) {
430 if (!(current->thread.flags & E2K_FLAG_32BIT)) {
431 set_ptr = &user_sigframe->uc.uc_sigmask;
432 #ifdef CONFIG_PROTECTED_MODE
433 } else if (current->thread.flags & E2K_FLAG_PROTECTED_MODE) {
434 struct ucontext_prot *uc;
436 uc = (struct ucontext_prot *) &user_sigframe->uc;
437 set_ptr = &uc->uc_sigmask;
438 #endif
439 } else {
440 struct ucontext_32 *uc;
442 uc = (struct ucontext_32 *) &user_sigframe->uc;
443 set_ptr = (sigset_t *) &uc->uc_sigmask;
445 } else {
446 set_ptr = &user_sigframe->saved_set;
449 if (copy_from_user(&set, set_ptr, sizeof(set))) {
450 force_sig(SIGSEGV, current);
451 return -EFAULT;
455 * All bellow as sys_rt_sigreturn for i386
457 sigdelsetmask(&set, ~_BLOCKABLE);
458 spin_lock_irq(&current->sighand->siglock);
459 current->blocked = set;
460 recalc_sigpending();
461 spin_unlock_irq(&current->sighand->siglock);
462 DebugSig("signal pending is %d\n",
463 signal_pending(current));
465 return 0;
469 #define synchronize_user_stack() {} /* Nothing to do. RF is already flushed. */
470 #define save_and_clear_fpu() {} /* NEEDSWORK */
473 #define printk printk_fixed_args
474 #define panic panic_fixed_args
475 noinline notrace __protect __interrupt
476 void go2user(long fn)
478 register e2k_cr1_lo_t cr1_lo;
479 register e2k_cr0_hi_t cr0_hi;
480 register e2k_cuir_t cuir;
481 register e2k_psr_t psr;
483 // DebugSig("start fn is 0x%lx\n", fn);
484 AS_WORD(cr1_lo) = E2K_GET_DSREG_NV(cr1.lo);
485 AS_WORD(cr0_hi) = E2K_GET_DSREG_NV(cr0.hi);
487 AS_WORD(psr) = 0;
488 AS_STRUCT(psr).sge = 1;
489 AS_STRUCT(psr).ie = 1; /* sti(); */
490 AS_STRUCT(psr).nmie = 1; /* nm sti(); */
491 AS_STRUCT(psr).pm = 0; /* user mode */
492 AS_STRUCT(cr1_lo).psr = AS_WORD(psr);
493 AS_STRUCT(cr0_hi).ip = fn >> 3; /* start user IP */
495 AS_WORD(cuir) = 0; // AS_STRUCT(cuir).checkup = 0 too
496 #if 0
497 if ((current->thread.flags & E2K_FLAG_32BIT)) {
498 AS_STRUCT(cuir).index = USER_CODES_32_INDEX;
499 } else {
500 AS_STRUCT(cuir).index = USER_CODES_START_INDEX;
502 #endif
503 AS_STRUCT(cr1_lo).cuir = AS_WORD(cuir);
505 if (!psr_irqs_disabled())
506 panic_fixed_args("go2user: under sti\n");
507 E2K_SET_DSREG_NV_NOIRQ(cr1.lo, AS_WORD(cr1_lo));
508 E2K_SET_DSREG_NV_NOIRQ(cr0.hi, AS_WORD(cr0_hi));
509 // DebugHS("cr1.lo 0x%lx cr0.hi 0x%lx\n",
510 // AS_WORD(cr1_lo), AS_WORD(cr0_hi));
512 // DebugHS("usd.hi %lx usd.lo %lx\n",
513 // READ_USD_HI_REG_VALUE(), READ_USD_LO_REG_VALUE());
514 // DebugHS("finish\n");
516 #ifdef CONFIG_CLI_CHECK_TIME
517 sti_return();
518 #endif
520 #ifdef CONFIG_PROTECTED_MODE
521 if ((current->thread.flags & E2K_FLAG_PROTECTED_MODE)) {
522 e2k_usd_lo_t usd_lo;
523 usd_lo = READ_USD_LO_REG();
524 usd_lo.USD_lo_p = 1;
525 usd_lo.USD_lo_base = (usd_lo.USD_lo_base & 0xFFFFFFFF) |
526 (current_thread_info()->pusd_pil + 0x100000000);
527 // correct usd as if we are entered by call
528 // hope we don't overflow psl field
529 WRITE_USD_LO_REG(usd_lo);
530 ENABLE_US_CLW();
532 #endif
534 * Set UPSR register in the initial state for user process
536 WRITE_UPSR_REG(current_thread_info()->upsr);
538 /* Restore user global registers. This is needed only for binco,
539 * since for e2k applications g16-g31 registers are actually local. */
540 E2K_RESTORE_GREG_IN_TRAP(current_thread_info()->gbase,
541 current_thread_info()->gext, 16, 17, 18, 19);
543 /* Prevent kernel information leakage */
544 #if E2K_MAXSR != 112
545 # error Must clear all registers here
546 #endif
547 #ifndef CONFIG_E2S_CPU_RF_BUG
548 E2K_CLEAR_RF_112();
549 #endif
551 #undef printk
552 #undef panic
554 __protect
555 extern void prot_as_sa_handler(long r0, long r1, int sig, long fn,
556 long r4, long r5, long r6, long r7,
557 e2k_sbr_t sbr, e2k_usd_lo_t usd_lo, e2k_usd_hi_t usd_hi);
558 #ifdef CONFIG_PROTECTED_MODE
559 __protect
560 extern void as_sa_handler_not_protect(int sig, siginfo_t *sip,
561 struct ucontext *env, long fn,
562 e2k_sbr_t sbr, e2k_usd_lo_t usd_lo, e2k_usd_hi_t usd_hi);
563 #endif
566 * as_sa_handler() invokes a hook to enter the signal handler
567 * after return from go2user().
569 * The hook will work in user mode (pm = 0) but its cr0.lo will
570 * have pm set to 1 (see go2user() above).
572 * We are to remember that we work on user resources here.
574 notrace noinline void __protect
575 as_sa_handler(int sig, siginfo_t *sip, struct ucontext *env, long fn,
576 e2k_sbr_t sbr, e2k_usd_lo_t usd_lo, e2k_usd_hi_t usd_hi)
578 #ifdef CONFIG_PROTECTED_MODE
579 struct thread_info *ti;
580 u64 m[8];
581 u64 a = (u64)m;
582 #endif
584 #ifdef CONFIG_PROTECTED_MODE
585 if (!(current->thread.flags & E2K_FLAG_PROTECTED_MODE)) {
586 as_sa_handler_not_protect(sig, sip, env, fn,
587 sbr, usd_lo, usd_hi);
588 } else {
589 a = (a + 15) & ~0x000000000000000FUL;
590 prot_as_sa_handler(
591 MAKE_AP_LO((unsigned long)a, 8*8, 0UL, RW_ENABLE),
592 MAKE_AP_HI((unsigned long)a, 8*8, 0UL, RW_ENABLE),
593 sig, fn,
594 MAKE_AP_LO((unsigned long)sip, sizeof (siginfo_t),
595 0UL, RW_ENABLE),
596 MAKE_AP_HI((unsigned long)sip, sizeof (siginfo_t),
597 0UL, RW_ENABLE),
598 MAKE_AP_LO((unsigned long)env, sizeof ( *env),
599 0UL, RW_ENABLE),
600 MAKE_AP_HI((unsigned long)env, sizeof ( *env),
601 0UL, RW_ENABLE),
602 sbr, usd_lo, usd_hi);
604 #else
605 as_sa_handler_not_protect(sig, sip, env, fn,
606 sbr, usd_lo, usd_hi);
607 #endif /* CONFIG_PROTECTED_MODE */
609 #ifdef CONFIG_PROTECTED_MODE
610 ti = (struct thread_info *) E2K_GET_DSREG_NV(osr0);
611 if (ti->task->thread.flags & E2K_FLAG_PROTECTED_MODE)
612 DISABLE_US_CLW();
613 #endif
616 #ifdef CONFIG_KERNEL_CODE_CONTEXT
617 extern notrace noinline __interrupt void __protect
618 start_handler_sequel(int sig, siginfo_t *sip, struct ucontext *env, long fn,
619 e2k_sbr_t sbr, e2k_usd_lo_t usd_lo, e2k_usd_hi_t usd_hi,
620 e2k_cutd_t u_cutd);
622 notrace __interrupt
623 static void hwbug_zero_cui_workaround(int sig, siginfo_t *u_si,
624 struct ucontext *uc, long fn, u64 ss_sbr,
625 u64 ss_usd_lo, u64 ss_usd_hi)
627 e2k_cutd_t k_cutd;
628 u64 u_cutd;
629 u64 k_usd_lo;
630 void (*start_handler_sequel_func)(
631 int sig, siginfo_t *sip, struct ucontext *env, long fn,
632 u64 sbr, u64 usd_lo, u64 usd_hi, u64 u_cutd);
634 u_cutd = E2K_GET_DSREG_NV(cutd);
635 k_usd_lo = E2K_GET_DSREG_NV(usd.lo);
636 start_handler_sequel_func = (typeof(start_handler_sequel_func))
637 AS_WORD(MAKE_PRIV_PL((e2k_addr_t)&start_handler_sequel));
638 E2K_PUTTAGD(start_handler_sequel_func, ETAGPLD);
640 * Cannot do
642 * AS(k_usd_lo).p = 1;
644 * because of LCC problems with __check_stack
646 k_usd_lo |= 1UL << 58;
647 /* Since USD.p == 1, the next call will increase USD.psl.
648 * But USD is essentially non-protected so we do not want that. */
649 k_usd_lo -= 0x100000000;
650 WRITE_USD_LO_REG_VALUE(k_usd_lo);
651 /* User CUT can be unavailable right now (swapped out),
652 * so set kernel CUT before switching to non-zero CUI
653 * (i.e. before accessing CUT). */
654 AW(k_cutd) = 0;
655 AS(k_cutd).base = (unsigned long) kernel_CUT;
656 WRITE_CUTD_REG(k_cutd);
657 start_handler_sequel_func(sig, u_si, uc, fn,
658 ss_sbr, ss_usd_lo, ss_usd_hi, u_cutd);
660 #endif /* CONFIG_KERNEL_CODE_CONTEXT */
662 #define printk printk_fixed_args
663 #define panic panic_fixed_args
664 static __interrupt notrace int
665 handle_signal(unsigned long sig, sigset_t *oldset, struct pt_regs *regs)
667 struct trap_pt_regs *trap = regs->trap;
668 register thread_info_t *thread_info = current_thread_info();
669 register struct k_sigaction *ka = &regs->ka;
670 register siginfo_t *info = &regs->info;
671 register pt_regs_t *tmp;
672 register rt_sigframe_t *rt_sigframe;
673 pt_regs_t *env = NULL;
674 struct ucontext *uc = NULL;
675 siginfo_t *u_si = NULL;
676 u64 ss_sbr, ss_sp, ss_stk_base, ss_stk_size;
677 u64 sigframe_size, fn;
678 e2k_usd_lo_t ss_usd_lo;
679 e2k_usd_hi_t ss_usd_hi;
680 int rval = 0, err = 0, nr_signals;
682 fn = (u64)(ka->sa.sa_handler);
684 DebugHS("start addr %lx regs %p fn %lx\n",
685 (trap) ? trap->tcellar[trap->curr_cnt].address : 0UL, regs, fn);
687 if (ka->sa.sa_flags & SA_ONESHOT) {
688 DebugHS("ka->sa.sa_handler = SIG_DFL\n");
689 ka->sa.sa_handler = SIG_DFL;
693 * We use stack frame, to have possibility to pass
694 * siginfo_t for handler. Maybe it isn't needed
695 * but maybe will be needed additional info.
696 * pt_regs are needed all time.
697 * ss_usd is signal stack usd.
698 * ss_usd can point to the current user stack or to sigaltstack
701 BUG_ON(!user_mode(regs));
703 nr_signals = 0;
704 for (tmp = regs->next; tmp != NULL; tmp = tmp->next)
705 ++nr_signals;
707 if (unlikely(nr_signals >= MAX_HANDLED_SIGS)) {
708 pr_info_ratelimited("[%d] %s: maximum signal recursion reached\n",
709 current->pid, current->comm);
710 do_exit(SIGKILL);
713 ss_sbr = regs->stacks.sbr;
714 ss_usd_lo = regs->stacks.usd_lo;
715 ss_usd_hi = regs->stacks.usd_hi;
716 ss_sp = AS_STRUCT(ss_usd_lo).base;
718 SAVE_USER_REGS_FROM_THREAD_INFO(thread_info, regs);
719 if (ss_usd_lo.USD_lo_p) {
720 current_thread_info()->pusd_pil = ss_sp & 0xFFF00000000;
721 DebugHS("%s: saved pil = 0x%lx\n", __FUNCTION__, ss_sp & 0xFFF00000000);
722 ss_sp = (ss_sp & 0xFFFFFFFF) + (ss_sbr & 0xFFF00000000);
724 ss_stk_size = AS_STRUCT(ss_usd_hi).size;
725 ss_stk_base = ss_sp - ss_stk_size;
727 DebugHS("ss_sp 0x%lx size 0x%lx ss_stk_base 0x%lx\n",
728 ss_sp, ss_stk_size, ss_stk_base);
729 DebugHS("usd.hi %lx usd.lo %lx\n",
730 READ_USD_HI_REG_VALUE(), READ_USD_LO_REG_VALUE());
731 sigframe_size = round_up(sizeof(rt_sigframe_t) + _PSIG_SIZE_,
732 E2K_ALIGN_USTACK);
734 * This is the X/Open sanctioned signal stack switching
735 * to alt stack.
737 if (ka->sa.sa_flags & SA_ONSTACK) {
738 bool use_alt_stack = false;
740 if (!on_sig_stack(ss_sp)) {
741 u64 alt_ss_stk_base = round_up(current->sas_ss_sp,
742 E2K_ALIGN_USTACK);
743 u64 alt_ss_stk_size = round_down(current->sas_ss_size +
744 current->sas_ss_sp - alt_ss_stk_base,
745 E2K_ALIGN_USTACK);
747 DebugHS("SA_ONSTACK ss 0x%lx sz 0x%lx, after aligning ss 0x%lx sz 0x%lx\n",
748 current->sas_ss_sp, current->sas_ss_size,
749 alt_ss_stk_base, alt_ss_stk_size);
750 if (alt_ss_stk_size >= sigframe_size) {
751 ss_stk_base = alt_ss_stk_base;
752 ss_stk_size = alt_ss_stk_size;
753 ss_sp = ss_stk_base + ss_stk_size;
754 use_alt_stack = true;
755 } else
756 DebugHS("alternative stack size 0x%lx < 0x%lx needed to pass signal info and context. Using standart stack.\n",
757 ss_stk_size, sigframe_size);
760 if (use_alt_stack) {
761 AS_STRUCT(ss_usd_lo).base = ss_stk_base;
762 STORE_USER_REGS_TO_THREAD_INFO(thread_info,
763 ss_stk_base,
764 ss_stk_base + ss_stk_size,
765 ss_stk_size
770 while (ss_stk_size < sigframe_size) {
771 DebugHS("user stack size 0x%lx < 0x%lx needed to pass signal info and context\n",
772 ss_stk_size, sigframe_size);
773 if (expand_user_data_stack(regs, current, false)) {
774 pr_info_ratelimited("[%d] %s: user data stack overflow\n",
775 current->pid, current->comm);
776 do_exit(SIGKILL);
778 ss_sbr = regs->stacks.sbr;
779 ss_usd_lo = regs->stacks.usd_lo;
780 ss_usd_hi = regs->stacks.usd_hi;
781 ss_sp = AS_STRUCT(ss_usd_lo).base;
782 if (ss_usd_lo.USD_lo_p) {
783 ss_sp = (ss_sp &0xFFFFFFFF) + (ss_sbr & 0xFFF00000000);
786 ss_stk_size = AS_STRUCT(ss_usd_hi).size;
787 ss_stk_base = ss_sp - ss_stk_size;
789 DebugHS("expanded stack: ss_sp 0x%lx size 0x%lx ss_stk_base 0x%lx\n",
790 ss_sp, ss_stk_size, ss_stk_base);
793 rt_sigframe = (rt_sigframe_t *) (ss_sp - sizeof(rt_sigframe_t));
795 ss_sp -= sigframe_size;
797 DebugHS("rt_sigframe %p\n", rt_sigframe);
799 if (!access_ok(VERIFY_WRITE, rt_sigframe, sizeof(rt_sigframe_t))) {
800 DebugHS("access failed to user stack frame base 0x%lx size 0x%lx sp 0x%lx\n",
801 ss_stk_base, ss_stk_size, ss_sp);
802 goto give_sigsegv;
804 u_si = &(rt_sigframe->info);
805 uc = &(rt_sigframe->uc);
807 if (__copy_to_user(&rt_sigframe->saved_set, oldset, sizeof(sigset_t)))
808 goto give_sigsegv;
810 if (TASK_IS_BINCO(current))
811 SAVE_RPR_REGS(regs);
813 if (ka->sa.sa_flags & SA_SIGINFO) {
814 err = setup_rt_frame(uc, u_si, info, oldset,
815 ss_usd_lo, regs);
816 if (err)
817 goto give_sigsegv;
818 } else
820 err = setup_frame((struct sigcontext *)u_si, info,
821 &uc->uc_extra, regs);
822 if (err)
823 goto give_sigsegv;
826 /* To do new usd */
827 #ifdef CONFIG_PROTECTED_MODE
828 if (current->thread.flags & E2K_FLAG_PROTECTED_MODE) {
829 /* let's work with unprotected usd */
830 /* change it in last moment */
831 ss_usd_lo.USD_lo_p = 0;
833 #endif
834 AS_STRUCT(ss_usd_lo).base = ss_sp;
835 ss_stk_size = (ss_sp - ss_stk_base);
836 AS_STRUCT(ss_usd_hi).size = ss_stk_size;
837 DebugHS("ss_stk_size %lx\n", ss_stk_size);
839 DebugHS("ss_usd_lo.base %lx ss_usd_hi.size %lx\n",
840 AS_STRUCT(ss_usd_lo).base, AS_STRUCT(ss_usd_hi).size);
841 DebugHS("ss_usd_lo %lx ss_usd_hi %lx\n",
842 (u64)AS_WORD(ss_usd_lo), (u64)AS_WORD(ss_usd_hi));
845 * Go to user hard stack and user c-stack but we will continue
846 * to work in kernel mode using window regs only befor
847 * we ret to user handler.
850 DebugHS("handle_signal() will start handler() 0x%lx for sig #%ld sig_info %p env %p\n",
851 fn, sig, u_si, env);
854 * Do this after the point of no return and after we have
855 * used "oldset" (which may point to current->blocked).
857 spin_lock_irq(&current->sighand->siglock);
858 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
859 if (!(ka->sa.sa_flags & SA_NODEFER)) {
860 DebugHS("! SA_NODEFER\n");
861 sigaddset(&current->blocked, sig);
863 recalc_sigpending();
864 spin_unlock_irq(&current->sighand->siglock);
866 /* Critical section here is short and tracing it is error-prone
867 * since we switch stacks */
868 raw_all_irq_disable();
871 * Notify kernel that we are ready to call signal handler.
872 * Do this under closed interrupts to avoid races with interrupt
873 * handlers adding work for us to do (interrupts will be reenabled
874 * only after switching to user).
876 while (test_thread_flag(TIF_NOTIFY_RESUME)) {
877 raw_all_irq_enable();
878 clear_thread_flag(TIF_NOTIFY_RESUME);
879 /* We do not have pt_regs that correspond to
880 * the handler context so just pass NULL. */
881 do_notify_resume(NULL);
882 raw_all_irq_disable();
886 * User function will be executed under PSR interrupts control
887 * and kernel should return interrupts mask control to PSR register
888 * (if it needs), before as_sa_handler() call to save this PSR into
889 * CR rigester from which register will be recovered while user
890 * function return. Otherwise it can be saved with control under UPSR.
891 * But UPSR is global register and it state can be modified by user
892 * and kernel will inherit user's UPSR, where interrupts is enable
893 * when they should be disabled in this point (see above).
896 RETURN_IRQ_TO_PSR();
898 if (from_trap(regs))
899 exception_exit(regs->trap->prev_state);
900 else
901 user_enter();
904 * Set current state of kernel stacks as entry points to switch
905 * to kernel stack to enable recursive traps on user handler
907 AW(thread_info->k_usd_hi) = READ_USD_HI_REG_VALUE();
908 AW(thread_info->k_usd_lo) = READ_USD_LO_REG_VALUE();
909 CHECK_TI_K_USD_SIZE(thread_info);
911 #ifdef CONFIG_KERNEL_CODE_CONTEXT
912 hwbug_zero_cui_workaround(sig, u_si, uc, fn,
913 ss_sbr, AW(ss_usd_lo), AW(ss_usd_hi));
914 #else
915 as_sa_handler(sig, u_si, uc, fn, ss_sbr, ss_usd_lo, ss_usd_hi);
916 #endif /* CONFIG_KERNEL_CODE_CONTEXT */
919 * Save new values of g16-g19 and set current pointers.
920 * This must be done at the same level as restoring %sbr,
921 * otherwise user_trap_handler() will save kernel values
922 * of global registers into thread_info.
924 thread_info = (struct thread_info *) E2K_GET_DSREG_NV(osr0);
925 E2K_SAVE_GREG(thread_info->gbase, thread_info->gext,
926 thread_info->tag, 16, 17);
927 E2K_SAVE_GREG(&thread_info->gbase[2], &thread_info->gext[2],
928 &thread_info->tag[2], 18, 19);
929 E2K_SET_DGREG_NV(16, thread_info);
930 E2K_SET_DGREG_NV(17, thread_info->task);
931 E2K_SET_DGREG_NV(19, (u64) thread_info->cpu);
932 set_my_cpu_offset(per_cpu_offset(raw_smp_processor_id()));
935 * User process can do fork() in the signal handler and
936 * we can return here from the son on other kernel stacks
937 * Restore all address info related to kernel stacks and
938 * task & thread_info structures
940 regs = thread_info->pt_regs;
941 BUG_ON(!user_mode(regs));
942 CHECK_CT_INTERRUPTED(regs);
945 * We will return from user here and will be under cli and
946 * under PSR control. Restore kernel UPSR register state and
947 * switch to UPSR interrupts control (if needs)
949 DO_SAVE_UPSR_REG(current_thread_info()->upsr);
951 SET_KERNEL_UPSR_WITH_DISABLED_NMI(0);
954 * We continue to work on user hard stack.
955 * But we should return on kernel resources
957 #ifdef CONFIG_CLI_CHECK_TIME
958 check_cli();
959 #endif
962 * Return to kernel stacks
963 * If hardware stacks are common for user and kernel, we do not
964 * return to user stacks and continue on the same stacks
966 WRITE_SBR_REG_VALUE(thread_info->k_stk_base + thread_info->k_stk_sz);
967 WRITE_USD_REG_VALUE(AW(thread_info->k_usd_hi),
968 AW(thread_info->k_usd_lo));
970 CHECK_TI_K_USD_SIZE(thread_info);
972 if (rval) {
973 E2K_SET_USER_STACK(1);
974 panic("handle_signal(): could not find user pt_regs structure after return from signal handler\n");
977 if (from_trap(regs))
978 regs->trap->prev_state = exception_enter();
979 else
980 user_exit();
982 /* Critical section here is short and tracing it is error-prone
983 * since we switch stacks */
984 raw_all_irq_enable();
987 * rt_sigreturn() is sys_call and we get trap and call ttable_entry().
988 * We will return to user according to regs (arg of rt_sigreturn).
990 if (DEBUG_HS_MODE && signal_pending(current)) {
991 E2K_SET_USER_STACK(1);
992 DebugHS("we get sig again\n");
994 if (DEBUG_HS_MODE) {
995 e2k_psp_hi_t psp_hi;
996 e2k_pcsp_hi_t pcsp_hi;
997 e2k_pshtp_t pshtp;
998 e2k_pcshtp_t pcshtp;
1000 E2K_SET_USER_STACK(1);
1001 raw_all_irq_disable();
1002 psp_hi = READ_PSP_HI_REG();
1003 pshtp = READ_PSHTP_REG();
1004 pcsp_hi = READ_PCSP_HI_REG();
1005 pcshtp = READ_PCSHTP_REG();
1006 raw_all_irq_enable();
1007 DebugHS("after user handler PS: ind 0x%lx, pshtp.ind 0x%lx, size 0x%lx, k_sz 0x%lx\n",
1008 psp_hi.PSP_hi_ind, GET_PSHTP_INDEX(pshtp),
1009 psp_hi.PSP_hi_size, KERNEL_P_STACK_SIZE);
1010 DebugHS("after user handler PCS: ind 0x%lx, pcshtp.ind 0x%lx, size 0x%lx, k_sz 0x%lx\n",
1011 pcsp_hi.PCSP_hi_ind, PCSHTP_SIGN_EXTEND(pcshtp),
1012 pcsp_hi.PCSP_hi_size, KERNEL_PC_STACK_SIZE);
1013 DebugHS("will start rt_sigreturn() with regs 0x%p\n",
1014 regs);
1017 RESTORE_USER_REGS_TO_THREAD_INFO(thread_info, regs);
1018 do_sigreturn(regs, sig, rt_sigframe);
1020 return 0;
1022 give_sigsegv:
1023 force_sigsegv(sig, current);
1025 DebugHS("force_sig_info return\n");
1027 return -EFAULT;
1029 #undef printk
1030 #undef panic
1033 * Note that 'init' is a special process: it doesn't get signals it doesn't
1034 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1035 * mistake.
1037 #define printk printk_fixed_args
1038 #define panic panic_fixed_args
1039 int __interrupt do_signal(struct pt_regs *regs)
1041 siginfo_t *info = &regs->info;
1042 struct k_sigaction *ka = &regs->ka;
1043 int signr;
1044 long errno = regs->sys_rval;
1045 sigset_t *oldset;
1047 BUG_ON(sge_checking_enabled());
1049 if (TASK_IS_BINCO(current))
1050 clear_delayed_signal_handling(current_thread_info());
1052 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
1053 oldset = &current->saved_sigmask;
1054 else
1055 oldset = &current->blocked;
1057 DebugHS("start pid %d\n", current->pid);
1059 signr = get_signal_to_deliver(info, ka, regs, NULL);
1060 DebugHS("signr %d regs->sys_num == %ld\n",
1061 signr, regs->sys_num);
1062 if (signr > 0) {
1063 DebugHS("signr == %d ka is 0x%p\n", signr, ka);
1065 regs->restart_needed = 0;
1067 /* Are we from a system call? */
1068 #if defined(CONFIG_SECONDARY_SPACE_SUPPORT)
1069 if (regs->sys_num >= 0 &&
1070 !current_thread_info()->sc_restart_ignore) {
1071 #else
1072 if (regs->sys_num >= 0) {
1073 #endif
1074 DebugHS("sys_num = %ld signr = %d errno %ld\n",
1075 regs->sys_num, signr, errno);
1077 * If so, check system call restarting.
1078 * It is done as for i386 (almost) but for i386
1079 * for case -ERESTARTNOINTR:regs->eip -= 2;
1080 * to do restart when we return from syscall.
1081 * We here set restart_needed = 1 and do_signal
1082 * returns -1.
1084 switch (errno) {
1085 case -ERESTART_RESTARTBLOCK:
1086 case -ERESTARTNOHAND:
1087 regs->sys_rval = -EINTR;
1088 break;
1089 case -ERESTARTSYS:
1090 if (!((*ka).sa.sa_flags & SA_RESTART)) {
1091 regs->sys_rval = -EINTR;
1092 break;
1094 /* fallthrough */
1095 case -ERESTARTNOINTR:
1096 regs->restart_needed = 1;
1098 DebugHS("sys_rval = %ld restart = %ld\n",
1099 regs->sys_rval, regs->restart_needed);
1102 /* Whee! Actually deliver the signal. */
1105 * Before calling signal handler we save global registers and
1106 * restore them after it returns. This is to support compiler
1107 * optimization which uses %g16-%g31 global registers as local
1108 * instead of using usual registers.
1110 #ifdef CONFIG_GREGS_CONTEXT
1111 if (!TASK_IS_BINCO(current)) {
1112 E2K_MOVE_TAGGED_QWORD(&current_thread_info()->gbase[0],
1113 &regs->gregs.gbase[0]);
1114 E2K_MOVE_TAGGED_QWORD(&current_thread_info()->gbase[2],
1115 &regs->gregs.gbase[2]);
1116 regs->gregs.gext[0] = current_thread_info()->gext[0];
1117 regs->gregs.gext[1] = current_thread_info()->gext[1];
1118 regs->gregs.gext[2] = current_thread_info()->gext[2];
1119 regs->gregs.gext[3] = current_thread_info()->gext[3];
1120 SAVE_GLOBAL_REGISTERS_SIGNAL(&regs->gregs);
1122 #endif
1124 if (handle_signal(signr, oldset, regs) == 0) {
1126 * A signal was successfully delivered; the saved
1127 * sigmask have been stored in the signal frame
1128 * and restored by do_sigreturn(), so we can simply
1129 * clear the TS_RESTORE_SIGMASK flag.
1131 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
1135 * User process can do fork() in the signal handler and
1136 * we can return here from the son on other kernel stacks.
1137 * Restore all address info related to kernel stacks and
1138 * task & thread_info structures.
1140 regs = current_thread_info()->pt_regs;
1142 #ifdef CONFIG_GREGS_CONTEXT
1143 if (!TASK_IS_BINCO(current)) {
1144 LOAD_GLOBAL_REGISTERS_SIGNAL(&regs->gregs);
1145 E2K_MOVE_TAGGED_QWORD(&regs->gregs.gbase[0],
1146 &current_thread_info()->gbase[0]);
1147 E2K_MOVE_TAGGED_QWORD(&regs->gregs.gbase[2],
1148 &current_thread_info()->gbase[2]);
1149 current_thread_info()->gext[0] = regs->gregs.gext[0];
1150 current_thread_info()->gext[1] = regs->gregs.gext[1];
1151 current_thread_info()->gext[2] = regs->gregs.gext[2];
1152 current_thread_info()->gext[3] = regs->gregs.gext[3];
1154 #endif
1156 if (regs->restart_needed)
1157 return -1;
1158 return 1;
1161 #ifdef CONFIG_DEBUG_INIT
1163 * Only to debug kernel, if some test launch as init process
1165 if (current->pid <= 1)
1166 panic("do_signal: signal on Init so will be recursive traps or signals\n");
1167 #endif /* CONFIG_DEBUG_INIT */
1169 /* Did we come from a system call? */
1170 #if defined(CONFIG_SECONDARY_SPACE_SUPPORT)
1171 if (regs->sys_num >= 0 &&
1172 !current_thread_info()->sc_restart_ignore) {
1173 #else
1174 if (regs->sys_num >= 0) {
1175 #endif
1176 /* Restart the system call - no handlers present */
1177 if (errno == -ERESTARTNOHAND ||
1178 errno == -ERESTARTSYS ||
1179 errno == -ERESTARTNOINTR) {
1180 DebugSig("ret -1 no handlers pid %d and replay syscall\n",
1181 current->pid);
1182 return -1;
1184 if (errno == -ERESTART_RESTARTBLOCK){
1185 DebugSig("ret -1 no handlers pid %d and force the restart syscall\n",
1186 current->pid);
1187 return -ERESTART_RESTARTBLOCK;
1192 * If there's no signal to deliver, we just put the saved sigmask back.
1194 if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
1195 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
1196 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
1199 DebugSig("exited with 0\n");
1201 return 0;
1203 #undef printk
1204 #undef panic
1206 static int
1207 do_sigreturn(pt_regs_t *regs, unsigned long signo, rt_sigframe_t *user_sigframe)
1209 struct k_sigaction *ka = &regs->ka;
1210 struct trap_pt_regs *trap = regs->trap;
1211 int ret;
1213 if ((ret = sigset_restore(user_sigframe, regs)))
1214 return ret;
1216 if (ka->sa.sa_flags & SA_SIGINFO) {
1217 unsigned long long *u_cr0_hi;
1218 e2k_cr0_hi_t cr0_hi;
1221 * User signal handler can change its return IP.
1222 * Update kernel pt_regs struct with a new value.
1224 if (current->thread.flags & E2K_FLAG_32BIT) {
1225 #ifdef CONFIG_PROTECTED_MODE
1226 if (!(current->thread.flags & E2K_FLAG_PROTECTED_MODE))
1227 #endif
1228 u_cr0_hi = &(((struct ucontext_32 *)
1229 (&user_sigframe->uc))->uc_mcontext.cr0_hi);
1230 #ifdef CONFIG_PROTECTED_MODE
1231 else
1232 u_cr0_hi = &(((struct ucontext_prot *)
1233 (&user_sigframe->uc))->uc_mcontext.cr0_hi);
1234 #endif
1235 } else {
1236 u_cr0_hi = &(user_sigframe->uc.uc_mcontext.cr0_hi);
1239 __get_user(AW(cr0_hi), u_cr0_hi);
1241 DebugSRT("update user process return IP u_cr0_hi =%p from 0x%lx to 0x%lx\n",
1242 u_cr0_hi, AS(regs->crs.cr0_hi).ip << 3,
1243 AS(cr0_hi).ip << 3);
1245 if (AS(regs->crs.cr0_hi).ip != AS(cr0_hi).ip &&
1246 (AS(cr0_hi).ip << 3) < TASK_SIZE) {
1248 * There could be such situation:
1249 * - user's signal handler changes IP
1250 * - kernel ignores the trap cellar in this case and
1251 * start to deliver the next signal
1252 * - user's signal handler doesn't change IP
1253 * - kernel starts to handle trap cellar again
1254 * Kernel should never handle trap cellar after user's
1255 * signal handler changed IP. So kernel should give up
1256 * the trap cellar.
1258 if (trap) {
1259 trap->tc_count = 0;
1260 DebugSRT("curr_cnt:%d tc_cnt:%d\n",
1261 trap->curr_cnt, trap->tc_count / 3);
1264 #ifdef CONFIG_SECONDARY_SPACE_SUPPORT
1265 regs->rp_ret = 0;
1266 #endif
1268 AS(regs->crs.cr0_hi).ip = AS(cr0_hi).ip;
1272 if (trap && (3 * trap->curr_cnt) < trap->tc_count &&
1273 trap->tc_count > 0) {
1274 DebugSRT("continue intrpt addr %lx cnt %d tc_count %d\n",
1275 trap->tcellar[trap->curr_cnt].address,
1276 trap->curr_cnt, trap->tc_count);
1277 trap->from_sigreturn = 1;
1278 do_trap_cellar(regs, 0);
1281 DebugSRT("return 0\n");
1282 return 0;
1286 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1288 * jump_graph_ftrace - mark skipped entries as finished in longjmp().
1289 * @limit - trim all frames after this.
1291 static notrace void jump_graph_ftrace(unsigned long limit)
1293 e2k_mem_crs_t *frame;
1294 unsigned long fp;
1295 unsigned long flags;
1296 unsigned long original_return_point;
1297 int index;
1299 if (current->curr_ret_stack < 0 || !current->ret_stack)
1300 return;
1302 /* We are removing psl_down windows from the top of the
1303 * stack. Corresponding entries from current->ret_stack
1304 * must be deleted (otherwise they will confuse ftrace
1305 * itself, copy_thread() and do_execve()). */
1306 # if DEBUG_FTRACE_MODE
1307 pr_info("%d: fixing ftrace stack\n", current->pid);
1308 for (index = 0; index <= current->curr_ret_stack; index++)
1309 pr_info("%d:\tentry at 0x%lx has %pS return value\n",
1310 current->pid, current->ret_stack[index].fp,
1311 current->ret_stack[index].ret);
1312 # endif
1315 * Remove all entries whose windows will be trimmed.
1316 * We are currently here:
1318 * ttable_entry -> ... -> jump_graph_ftrace
1320 * Previous functions in chain stack could be replaced with
1321 * return_to_handler_XX(). But consider the following situation:
1323 * <user functions 1>
1324 * <kernel functions 1>
1325 * <user functions 2>
1326 * <kernel functions 2> <=== WE ARE HERE
1328 * If we jump to <user functions 1> then we have to skip ftrace's
1329 * ret_stack entries from <kernel functions 1>, but we must keep
1330 * ttable_entry() which is in <kernel functions 2>. This is kinda
1331 * awkward, and we have to mark all of those functions as finished
1332 * right now.
1335 index = current->curr_ret_stack;
1336 fp = current->ret_stack[index].fp;
1338 /* Remove all trimmed entries from current->ret_stack,
1339 * restoring pointers along the way. */
1340 while (index >= 0 && fp > limit) {
1341 DebugFTRACE("%d:\tremoving entry at 0x%lx (%pS)\n",
1342 current->pid, current->ret_stack[index].fp,
1343 current->ret_stack[index].ret);
1345 raw_all_irq_save(flags);
1346 original_return_point = ftrace_return_to_handler(fp);
1348 E2K_FLUSHC;
1349 E2K_FLUSH_WAIT;
1351 frame = (e2k_mem_crs_t *) fp;
1352 AW(frame->cr0_hi) = original_return_point;
1353 raw_all_irq_restore(flags);
1355 --index;
1356 if (index < 0)
1357 break;
1358 fp = current->ret_stack[index].fp;
1361 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1363 static void check_longjmp_permissions(u64 old_ip, u64 new_ip)
1365 struct mm_struct *mm = current->mm;
1366 struct vm_area_struct *old_vma, *new_vma;
1367 int ret = 0;
1369 down_read(&mm->mmap_sem);
1371 old_vma = find_vma(mm, old_ip);
1372 if (!old_vma || old_ip < old_vma->vm_start) {
1373 ret = -ESRCH;
1374 goto out_unlock;
1377 new_vma = find_vma(mm, new_ip);
1378 if (!new_vma || new_ip < new_vma->vm_start) {
1379 ret = -ESRCH;
1380 goto out_unlock;
1383 if ((old_vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)) ^
1384 (new_vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC))) {
1385 ret = -EPERM;
1386 goto out_unlock;
1389 out_unlock:
1390 up_read(&mm->mmap_sem);
1392 if (ret) {
1393 SIGDEBUG_PRINT("SIGKILL. longjmp(): old and new IPs have different permissions\n");
1394 force_sig(SIGKILL, current);
1400 #if _NSIG != 64
1401 # error Fix sigmask restoring in longjmp/setcontext
1402 #endif
1403 noinline
1404 long do_longjmp(u64 retval, u64 jmp_sigmask, e2k_cr0_hi_t jmp_cr0_hi,
1405 e2k_cr1_lo_t jmp_cr1_lo, e2k_pcsp_lo_t jmp_pcsp_lo,
1406 e2k_pcsp_hi_t jmp_pcsp_hi, u32 jmp_br,
1407 u32 fpcr, u32 fpsr, u32 pfpfr, bool restore_fpu)
1409 thread_info_t *thread_info = current_thread_info();
1410 long rval = 0;
1411 pt_regs_t *cur_regs;
1412 pt_regs_t *regs;
1413 struct pt_regs new_regs;
1414 e2k_psp_hi_t psp_hi;
1415 e2k_pcsp_lo_t pcsp_lo;
1416 e2k_pcsp_hi_t pcsp_hi;
1417 s64 fp_ind;
1418 s64 cr_ind;
1419 e2k_addr_t pcs_base_candidate;
1420 e2k_addr_t ps_base_candidate;
1421 e2k_addr_t pcs_window_base;
1422 e2k_addr_t new_pcs_window_base;
1423 e2k_size_t pcs_window_size;
1424 e2k_size_t new_pcs_window_size;
1425 e2k_size_t pcs_window_ind;
1426 e2k_size_t new_pcs_window_ind;
1427 e2k_addr_t new_sbr;
1428 e2k_addr_t ps_base;
1429 e2k_addr_t ps_window_offset;
1430 e2k_addr_t pcs_ind;
1431 e2k_addr_t pcs_base;
1432 e2k_addr_t pcs_window_offset;
1433 e2k_addr_t ps_ind;
1434 int psl_down;
1435 int ppsl_shift = 0;
1436 e2k_size_t wd_psize;
1437 int sw_num;
1438 int sw;
1439 u64 ussz;
1440 e2k_mem_crs_t *crs;
1441 struct hw_stack_area *new_u_pcs;
1442 int new_u_pcs_found = 0;
1444 cur_regs = thread_info->pt_regs;
1446 copy_jmp_regs(cur_regs, &new_regs);
1448 DebugSLJ("current regs 0x%p\n", cur_regs);
1450 DebugSLJ("system call from IP in CR0 0x%lx\n",
1451 AS_STRUCT(cur_regs->crs.cr0_hi).ip << 3);
1453 DebugSLJ("jump point sigmask 0x%lx ip 0x%lx cr1_lo 0x%lx : wbs 0x%x wpsz 0x%x wfx %d\n",
1454 jmp_sigmask, AW(jmp_cr0_hi), AW(jmp_cr1_lo),
1455 AS_STRUCT(jmp_cr1_lo).wbs,
1456 AS_STRUCT(jmp_cr1_lo).wpsz,
1457 AS_STRUCT(jmp_cr1_lo).wfx);
1458 DebugSLJ("jump point PCSP : base 0x%llx, ind 0x%x, size 0x%x\n",
1459 jmp_pcsp_lo.PCSP_lo_base, jmp_pcsp_hi.PCSP_hi_ind,
1460 jmp_pcsp_hi.PCSP_hi_size);
1462 check_longjmp_permissions(AS(cur_regs->crs.cr0_hi).ip << 3,
1463 AS(jmp_cr0_hi).ip << 3);
1465 psp_hi = cur_regs->stacks.psp_hi;
1466 ps_base = (e2k_addr_t) GET_PS_BASE(thread_info);
1467 ps_window_offset = cur_regs->stacks.psp_lo.PSP_lo_base - ps_base;
1468 ps_ind = ps_window_offset + psp_hi.PSP_hi_ind;
1470 pcsp_lo = cur_regs->stacks.pcsp_lo;
1471 pcsp_hi = cur_regs->stacks.pcsp_hi;
1472 pcs_base = (e2k_addr_t) GET_PCS_BASE(thread_info);
1473 pcs_window_base = pcsp_lo.PCSP_lo_base;
1474 pcs_window_size = pcsp_hi.PCSP_hi_size;
1475 pcs_window_ind = pcsp_hi.PCSP_hi_ind;
1476 pcs_window_offset = pcs_window_base - pcs_base;
1477 pcs_ind = pcs_window_offset + pcs_window_ind;
1479 pcsp_lo.PCSP_lo_base = pcs_base;
1480 pcsp_hi.PCSP_hi_ind = pcs_ind;
1481 psp_hi.PSP_hi_ind = ps_ind;
1483 new_pcs_window_base = jmp_pcsp_lo.PCSP_lo_base;
1484 new_pcs_window_size = jmp_pcsp_hi.PCSP_hi_size;
1485 new_pcs_window_ind = jmp_pcsp_hi.PCSP_hi_ind;
1488 * In the case of pseudo discontinuous user hardware stacks one should
1489 * find an area of user hardware stack to make a longjmp to and correct
1490 * new_pcs_window_base.
1492 if (UHWS_PSEUDO_MODE) {
1493 new_u_pcs = thread_info->cur_pcs;
1494 if (new_pcs_window_base < (e2k_addr_t)new_u_pcs->base ||
1495 new_pcs_window_base >=
1496 (e2k_addr_t)new_u_pcs->base +
1497 new_u_pcs->size) {
1498 list_for_each_entry(new_u_pcs,
1499 &thread_info->old_u_pcs_list,
1500 list_entry) {
1501 if (new_pcs_window_base >=
1502 (e2k_addr_t)new_u_pcs->base &&
1503 new_pcs_window_base <
1504 (e2k_addr_t)new_u_pcs->base +
1505 new_u_pcs->size) {
1506 new_u_pcs_found = 1;
1507 break;
1510 if (!new_u_pcs_found) {
1511 SIGDEBUG_PRINT("SIGKILL. do_longjmp(): couldn't find new_u_pcs\n");
1512 force_sig(SIGKILL, current);
1513 return 0;
1515 new_pcs_window_base +=
1516 (e2k_addr_t)thread_info->cur_pcs->base -
1517 (e2k_addr_t)new_u_pcs->base;
1521 psl_down = ((pcs_window_base + pcs_window_ind) -
1522 (new_pcs_window_base + new_pcs_window_ind)) / SZ_OF_CR;
1524 if (current->thread.flags & E2K_FLAG_PROTECTED_MODE) {
1525 unsigned long flags;
1527 raw_all_irq_save(flags);
1528 E2K_FLUSHC; /* Chain stack only flushing is enough here.*/
1529 E2K_FLUSH_WAIT;
1530 crs = (e2k_mem_crs_t *) (pcs_window_base + pcs_window_ind);
1531 while (crs > (e2k_mem_crs_t *) (new_pcs_window_base +
1532 new_pcs_window_ind)) {
1533 if ((e2k_addr_t) crs < pcs_window_base) {
1534 raw_all_irq_restore(flags);
1535 SIGDEBUG_PRINT("SIGKILL. do_longjmp(): invalid parameters pcs_window_base:0x%lx pcs_window_ind:0x%lx new_pcs_window_base:0x%lx new_pcs_window_ind:0x%lx\n",
1536 pcs_window_base, pcs_window_ind,
1537 new_pcs_window_base,
1538 new_pcs_window_ind);
1539 force_sig(SIGKILL, current);
1540 return 0;
1542 if ((AS_STRUCT(crs->cr0_hi).ip << 3) < TASK_SIZE) {
1543 ppsl_shift++;
1545 crs--;
1547 raw_all_irq_restore(flags);
1550 DebugSLJ("current USD = 0x%lx : 0x%lx; psl_down = %d, ppsl_shift = %d\n ",
1551 AS_WORD(cur_regs->stacks.usd_hi),
1552 AS_WORD(cur_regs->stacks.usd_lo), psl_down, ppsl_shift);
1554 DebugSLJ("current psp= 0x%lx : 0x%lx to be constricted\n",
1555 AS_WORD(cur_regs->stacks.psp_hi),
1556 AS_WORD(cur_regs->stacks.psp_lo));
1558 if (ps_window_offset) {
1559 DebugSLJ("procedure stack absolute base addr 0x%lx, offset 0x%lx absolute ind 0x%lx\n",
1560 ps_base, ps_window_offset, ps_ind);
1563 DebugSLJ("current chain stack base 0x%lx ind 0x%lx size 0x%lx will be constricted for %d level(s)\n",
1564 pcs_window_base, pcs_window_ind, pcs_window_size, psl_down);
1566 if (pcs_window_offset) {
1567 DebugSLJ("procedure chain stack absolute base addr 0x%lx, offset 0x%lx absolute ind 0x%lx\n",
1568 pcs_base, pcs_window_offset, pcs_ind);
1571 go_hd_stk_down(psp_hi, pcsp_lo, pcsp_hi,
1572 psl_down, (e2k_addr_t *) &fp_ind, (e2k_addr_t *) &cr_ind,
1573 &wd_psize, &sw_num, &new_regs.crs, 1 /* user stacks */);
1575 DebugSLJ("jump point procedure stack ind 0x%lx, chain stack ind 0x%lx, WD_psize 0x%lx SW_num %d\n",
1576 fp_ind, cr_ind, wd_psize, sw_num);
1578 if (psl_down != 0) {
1579 e2k_size_t cur_wbs; /* in quad registers */
1580 e2k_size_t jmp_wbs;
1581 e2k_size_t wd_size;
1582 e2k_size_t delta;
1583 e2k_size_t jmp_psize;
1585 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1586 jump_graph_ftrace(new_pcs_window_ind -
1587 (pcs_window_base - new_pcs_window_base));
1588 #endif
1591 * Need to correct procedure stack frame size of
1592 * function to jump, as it was while setting jumping
1593 * point. Now we can be here after trap (wd_psize == 0)
1594 * or jumping function call some other function
1595 * with other parametrs number. If parametrs number was
1596 * changed it needs correct fp_ind for procedure stack
1598 cur_wbs = AS_STRUCT(new_regs.crs.cr1_lo).wbs;
1599 jmp_wbs = AS_STRUCT(jmp_cr1_lo).wbs;
1600 wd_size = cur_wbs + wd_psize;
1601 DebugSLJ("current CR1.wbs 0x%lx, jump point CR1.wbs 0x%lx WD.size should be 0x%lx\n",
1602 cur_wbs, jmp_wbs, wd_size);
1603 if (wd_size <= jmp_wbs) {
1604 SIGDEBUG_PRINT("SIGKILL. sys_e2k_longjmp2(): calculated jump point WD_size 0x%lx (CR1.wbs 0x%lx + WD.psize 0x%lx) <= 0x%lx received from jump info wbs\n",
1605 wd_size, cur_wbs, wd_psize, jmp_wbs);
1606 force_sig(SIGKILL, current);
1607 return 0;
1609 delta = cur_wbs - jmp_wbs;
1610 if (delta != 0) {
1611 fp_ind -= (delta * EXT_4_NR_SZ);
1612 DebugSLJ("corrected jump point procedure stack ind 0x%lx\n",
1613 fp_ind);
1615 jmp_psize = (wd_size - jmp_wbs) * E2K_NR_SIZE;
1616 delta = AS_STRUCT(new_regs.wd).psize - jmp_psize;
1617 DebugSLJ("current WD_psize 0x%x, jump point WD.psize 0x%lx\n",
1618 AS_STRUCT(new_regs.wd).psize, jmp_psize);
1619 if (delta != 0) {
1620 s64 new_wd_psize = AS_STRUCT(new_regs.wd).psize -
1621 delta;
1622 if (new_wd_psize <= 0) {
1623 SIGDEBUG_PRINT("SIGKILL. sys_e2k_longjmp2(): calculated jump point WD.psize 0x%lx <= 0 (was WD.psize 0x%x, should be 0x%lx)\n",
1624 new_wd_psize,
1625 AS_STRUCT(new_regs.wd).psize,
1626 jmp_psize);
1627 force_sig(SIGKILL, current);
1628 return 0;
1630 AS_STRUCT(new_regs.wd).psize = new_wd_psize;
1631 DebugSLJ("corrected jump point WD_psize 0x%x\n",
1632 AS_STRUCT(new_regs.wd).psize);
1635 if (fp_ind < 0) {
1636 SIGDEBUG_PRINT("SIGKILL. sys_e2k_longjmp2(): jump point procedure stack index (-0x%llx) is out of user PS\n",
1637 -fp_ind);
1638 force_sig(SIGKILL, current);
1639 return 0;
1640 } else if (fp_ind < ps_window_offset) {
1641 /* Went below */
1642 DebugSLJ("jump point procedure stack ind (fp) is below of current active frame\n");
1643 ps_base_candidate = ps_base + fp_ind;
1645 new_regs.stacks.psp_lo.PSP_lo_base =
1646 ps_base_candidate & PAGE_MASK;
1647 new_regs.stacks.psp_hi.PSP_hi_ind =
1648 ps_base_candidate & (~PAGE_MASK);
1649 DebugSLJ("jump point procedure stack new active frame base 0x%llx, ind 0x%x, size 0x%x\n",
1650 new_regs.stacks.psp_lo.PSP_lo_base,
1651 new_regs.stacks.psp_hi.PSP_hi_ind,
1652 new_regs.stacks.psp_hi.PSP_hi_size);
1653 } else if (fp_ind <= ps_window_offset +
1654 cur_regs->stacks.psp_hi.PSP_hi_ind) {
1656 /* Same frame */
1657 new_regs.stacks.psp_hi.PSP_hi_ind = fp_ind - ps_window_offset;
1658 DebugSLJ("jump point procedure stack is in the current active frame: base 0x%llx, ind 0x%x, size 0x%x\n",
1659 new_regs.stacks.psp_lo.PSP_lo_base,
1660 new_regs.stacks.psp_hi.PSP_hi_ind,
1661 new_regs.stacks.psp_hi.PSP_hi_size);
1662 } else {
1663 SIGDEBUG_PRINT("SIGKILL. sys_e2k_longjmp2(): jump point procedure stack index (0x%llx) can not be above of current active PS (0x%llx)\n",
1664 fp_ind, ps_window_offset +
1665 cur_regs->stacks.psp_hi.PSP_hi_ind);
1666 force_sig(SIGKILL, current);
1667 return 0;
1670 if (cr_ind < 0) {
1671 SIGDEBUG_PRINT("SIGKILL. sys_e2k_longjmp2(): jump point procedure chain stack index (-0x%llx) is out of user PCS\n",
1672 -cr_ind);
1673 force_sig(SIGKILL, current);
1674 return 0;
1675 } else if (cr_ind < pcs_window_offset) {
1677 /* Went below */
1678 DebugSLJ("jump point procedure stack ind (fp) is below of current active frame\n");
1679 pcs_base_candidate = pcs_base + cr_ind;
1680 new_regs.stacks.pcsp_lo.PCSP_lo_base =
1681 pcs_base_candidate & PAGE_MASK;
1682 new_regs.stacks.pcsp_hi.PCSP_hi_ind =
1683 pcs_base_candidate & (~PAGE_MASK);
1684 DebugSLJ("jump point procedure chain stack new active frame base 0x%llx, ind 0x%x, size 0x%x\n",
1685 new_regs.stacks.pcsp_lo.PCSP_lo_base,
1686 new_regs.stacks.pcsp_hi.PCSP_hi_ind,
1687 new_regs.stacks.pcsp_hi.PCSP_hi_size);
1688 } else if (cr_ind <= pcs_window_offset + pcs_window_ind) {
1690 /* Same frame */
1691 new_regs.stacks.pcsp_hi.PCSP_hi_ind = cr_ind -
1692 pcs_window_offset;
1693 DebugSLJ("jump point procedure chain stack is in the current active frame: base 0x%llx, ind 0x%x, size 0x%x\n",
1694 new_regs.stacks.pcsp_lo.PCSP_lo_base,
1695 new_regs.stacks.pcsp_hi.PCSP_hi_ind,
1696 new_regs.stacks.pcsp_hi.PCSP_hi_size);
1697 } else {
1698 SIGDEBUG_PRINT("SIGKILL. sys_e2k_longjmp2(): jump point procedure chain stack index (0x%llx) can not be above of current active PCS (0x%llx)\n",
1699 cr_ind, pcs_window_offset + pcs_window_ind);
1700 force_sig(SIGKILL, current);
1701 return 0;
1704 jmp_regs.sys_rval = (int)retval;
1705 if (retval == 0) jmp_regs.sys_rval = 1;
1708 ussz = AS_STRUCT(new_regs.crs.cr1_hi).ussz << 4;
1709 DebugSLJ("jump point data stack size 0x%lx\n",
1710 ussz);
1712 new_regs.crs.cr1_lo = jmp_cr1_lo;
1713 DebugSLJ("jump point in mem CR1: wbs 0x%x, wpsz 0x%x, wfx %d\n",
1714 AS_STRUCT(new_regs.crs.cr1_lo).wbs,
1715 AS_STRUCT(new_regs.crs.cr1_lo).wpsz,
1716 AS_STRUCT(new_regs.crs.cr1_lo).wfx);
1717 DebugSLJ("jump point IP in mem CR0 0x%lx new IP 0x%lx\n",
1718 AS(new_regs.crs.cr0_hi).ip << 3, AS(jmp_cr0_hi).ip << 3);
1720 DebugSLJ("jump point BR in mem CR1 0x%x new BR 0x%x\n",
1721 AS_STRUCT(new_regs.crs.cr1_hi).br , jmp_br);
1723 AS_STRUCT(new_regs.crs.cr0_hi).ip = AS(jmp_cr0_hi).ip;
1724 AS_STRUCT(new_regs.crs.cr1_hi).br = jmp_br;
1726 /* we can be from user sig handler */
1729 * constrict_hardware_stacks() is needed,
1730 * because hard stacks (which we return for)
1731 * can be less then now.
1733 rval = constrict_hardware_stacks(cur_regs, &new_regs);
1734 if (rval != 0) {
1735 SIGDEBUG_PRINT("SIGKILL. sys_e2k_longjmp2(): could not constrict hardware stacks\n");
1736 force_sig(SIGKILL, current);
1737 return 0;
1741 * Find first pt_regs structure near long jump point
1742 * so delete all pt_regs to throw away
1744 local_irq_disable();
1746 regs = cur_regs;
1747 DebugSLJ("pt_regs list head is 0x%p\n",
1748 regs);
1749 for (sw = 0; sw < sw_num; sw ++) {
1750 CHECK_PT_REGS_LOOP(regs);
1751 regs = regs->next;
1752 if (regs == NULL) {
1753 panic("do_longjmp(): could not find pt_regs struture #%d in the list of thread regs\n",
1754 sw);
1757 if (!user_mode(regs)) {
1758 panic("do_longjmp(): find pt_regs struture #%d is not user regs structure\n",
1759 sw_num);
1761 DebugSLJ("pt_regs to jump is 0x%p\n",
1762 regs);
1763 new_regs.next = regs->next;
1764 CHECK_PT_REGS_LOOP(new_regs.next);
1767 * usd stack restoration if signal SA_ONSTACK flag set
1769 #ifdef CONFIG_PROTECTED_MODE
1770 if (current->thread.flags & E2K_FLAG_PROTECTED_MODE) {
1772 * For protected mode in define SAVE_USER_USD_REGS
1773 * (regs)->stacks.usd_lo.USD_lo_half = pusd_lo.PUSD_lo_half
1774 * It needs only address in PUSD_lo_base (last 32 bits)
1776 new_sbr = (AS_STRUCT(regs->stacks.usd_lo).base & 0xffffffff) -
1777 AS_STRUCT(regs->stacks.usd_hi).size;
1778 } else {
1779 #endif /* CONFIG_PROTECTED_MODE */
1780 new_sbr = AS_STRUCT(regs->stacks.usd_lo).base -
1781 AS_STRUCT(regs->stacks.usd_hi).size;
1782 #ifdef CONFIG_PROTECTED_MODE
1784 #endif /* CONFIG_PROTECTED_MODE */
1786 AS_STRUCT(new_regs.stacks.usd_lo).base = new_sbr + ussz;
1787 AS_STRUCT(new_regs.stacks.usd_hi).size = ussz;
1789 #ifdef CONFIG_PROTECTED_MODE
1790 /* delete global pointers to the local stack */
1791 if (current->thread.flags & E2K_FLAG_PROTECTED_MODE) {
1792 int jmp_psl;
1793 e2k_pusd_lo_t *pusd_lo, *new_pusd_lo;
1795 pusd_lo = (e2k_pusd_lo_t *) &cur_regs->stacks.usd_lo;
1796 new_pusd_lo = (e2k_pusd_lo_t *) &new_regs.stacks.usd_lo;
1798 jmp_psl = (AS(*pusd_lo).psl) - ppsl_shift;
1800 ASP(new_pusd_lo).psl = jmp_psl;
1802 DebugSLJ("new USD = %lx:%lx NEW psl is %d ppsl_shift=%d psl_down=%d\n",
1803 AS_WORD(new_regs.stacks.usd_hi),
1804 AS_WORD(new_regs.stacks.usd_lo),
1805 ASP(pusd_lo).psl, ppsl_shift , psl_down);
1807 if (jmp_psl * SZ_OF_CR > new_pcs_window_ind) {
1808 pr_info(" BAD in longjmp() new_pcs_window_ind:0x %ld jmp_psl=%d(%ld)\n",
1809 new_pcs_window_ind, jmp_psl,
1810 jmp_psl * SZ_OF_CR);
1812 ASP(new_pusd_lo).psl = jmp_psl;
1814 delete_records(jmp_psl);
1816 DebugSLJ("jump point pusd_lo.psl %d\n", ASP(pusd_lo).psl);
1818 #endif
1819 DebugSLJ("jump point psl_down %d\n", psl_down);
1821 DebugSLJ("new USD = %lx:%lx\n",
1822 AS_WORD(new_regs.stacks.usd_hi),
1823 AS_WORD(new_regs.stacks.usd_lo));
1825 copy_jmp_regs(&new_regs, regs);
1826 adjust_intr_counter(regs);
1827 thread_info->pt_regs = regs;
1828 thread_info->u_stk_base = new_sbr;
1829 if (jmp_sigmask & sigmask(SIGKILL)) {
1830 jmp_sigmask &= _BLOCKABLE;
1831 (&current->blocked)->sig[0] = jmp_sigmask;
1832 recalc_sigpending();
1834 local_irq_enable();
1836 if (restore_fpu) {
1837 E2K_SET_SREG_NV(fpcr, fpcr);
1838 E2K_SET_SREG_NV(fpsr, fpsr);
1839 E2K_SET_SREG_NV(pfpfr, pfpfr);
1842 return retval;
1845 long sys_e2k_longjmp2(struct jmp_info *env, u64 retval)
1847 struct jmp_info jmp_info;
1848 int rval;
1850 DebugSLJ("pid %d start env %p retval %ld\n",
1851 current->pid, env, retval);
1853 rval = copy_jmpinfo_from_user(env, &jmp_info);
1854 if (rval) {
1855 SIGDEBUG_PRINT("SIGKILL. sys_e2k_longjmp2(): could not copy jump info from user env 0x%p\n",
1856 env);
1857 force_sig(SIGKILL, current);
1858 return rval;
1861 return do_longjmp(retval, jmp_info.sigmask,
1862 (e2k_cr0_hi_t) jmp_info.ip,
1863 (e2k_cr1_lo_t) jmp_info.cr1lo,
1864 (e2k_pcsp_lo_t) jmp_info.pcsplo, (e2k_pcsp_hi_t)
1865 (jmp_info.pcsphi + PCSHTP_SIGN_EXTEND(jmp_info.pcshtp)),
1866 jmp_info.br, 0, 0, 0, 0);
1869 long sys_setcontext(const struct ucontext __user *ucp, int sigsetsize)
1871 int rval;
1872 e2k_cr0_hi_t cr0_hi;
1873 e2k_cr1_lo_t cr1_lo;
1874 e2k_cr1_hi_t cr1_hi;
1875 e2k_pcsp_lo_t pcsp_lo;
1876 e2k_pcsp_hi_t pcsp_hi;
1877 u64 sigmask, prev_key, next_key;
1878 u32 fpcr, fpsr, pfpfr;
1880 if (sigsetsize != sizeof(sigset_t))
1881 return -EINVAL;
1883 if (!access_ok(ACCESS_WRITE, ucp, sizeof(struct ucontext)))
1884 return -EFAULT;
1886 rval = __get_user(next_key, &ucp->uc_mcontext.sbr);
1887 if (rval)
1888 return -EFAULT;
1890 prev_key = context_ti_key(current_thread_info());
1892 DebugCTX("ucp=%lx current key=0x%lx next key=0x%lx\n",
1893 ucp, prev_key, next_key);
1894 if (!context_keys_equal(prev_key, next_key))
1895 return do_swapcontext(NULL, ucp, false, CTX_64_BIT);
1897 rval = __copy_from_user(&sigmask, &ucp->uc_sigmask,
1898 sizeof(ucp->uc_sigmask));
1899 rval |= __get_user(AW(cr0_hi), &ucp->uc_mcontext.cr0_hi);
1900 rval |= __get_user(AW(cr1_lo), &ucp->uc_mcontext.cr1_lo);
1901 rval |= __get_user(AW(cr1_hi), &ucp->uc_mcontext.cr1_hi);
1902 rval |= __get_user(AW(pcsp_lo), &ucp->uc_mcontext.pcsp_lo);
1903 rval |= __get_user(AW(pcsp_hi), &ucp->uc_mcontext.pcsp_hi);
1904 rval |= __get_user(fpcr, &ucp->uc_extra.fpcr);
1905 rval |= __get_user(fpsr, &ucp->uc_extra.fpsr);
1906 rval |= __get_user(pfpfr, &ucp->uc_extra.pfpfr);
1907 if (rval)
1908 return -EFAULT;
1910 /* A hack to make do_longjmp() restore blocked signals mask */
1911 sigmask |= sigmask(SIGKILL);
1913 DebugCTX("calling longjmp\n");
1914 do_longjmp(0, sigmask, cr0_hi, cr1_lo, pcsp_lo, pcsp_hi,
1915 AS(cr1_hi).br, fpcr, fpsr, pfpfr, 1);
1917 return 0;
1920 #ifdef CONFIG_COMPAT
1921 long compat_sys_setcontext(const struct ucontext_32 __user *ucp,
1922 int sigsetsize)
1924 int rval;
1925 e2k_cr0_hi_t cr0_hi;
1926 e2k_cr1_lo_t cr1_lo;
1927 e2k_cr1_hi_t cr1_hi;
1928 e2k_pcsp_lo_t pcsp_lo;
1929 e2k_pcsp_hi_t pcsp_hi;
1930 u64 sigmask, prev_key, next_key;
1931 u32 fpcr, fpsr, pfpfr;
1933 if (sigsetsize != sizeof(sigset_t))
1934 return -EINVAL;
1936 if (!access_ok(ACCESS_WRITE, ucp, sizeof(struct ucontext)))
1937 return -EFAULT;
1939 rval = __get_user(next_key, &ucp->uc_mcontext.sbr);
1940 if (rval)
1941 return -EFAULT;
1943 prev_key = context_ti_key(current_thread_info());
1945 DebugCTX("ucp=%lx current key=0x%lx next key=0x%lx\n",
1946 ucp, prev_key, next_key);
1947 if (!context_keys_equal(prev_key, next_key))
1948 return do_swapcontext(NULL, ucp, false, CTX_32_BIT);
1950 rval = __copy_from_user(&sigmask, &ucp->uc_sigmask,
1951 sizeof(ucp->uc_sigmask));
1952 rval |= __get_user(AW(cr0_hi), &ucp->uc_mcontext.cr0_hi);
1953 rval |= __get_user(AW(cr1_lo), &ucp->uc_mcontext.cr1_lo);
1954 rval |= __get_user(AW(cr1_hi), &ucp->uc_mcontext.cr1_hi);
1955 rval |= __get_user(AW(pcsp_lo), &ucp->uc_mcontext.pcsp_lo);
1956 rval |= __get_user(AW(pcsp_hi), &ucp->uc_mcontext.pcsp_hi);
1957 rval |= __get_user(fpcr, &ucp->uc_extra.fpcr);
1958 rval |= __get_user(fpsr, &ucp->uc_extra.fpsr);
1959 rval |= __get_user(pfpfr, &ucp->uc_extra.pfpfr);
1960 if (rval)
1961 return -EFAULT;
1963 /* A hack to make do_longjmp() restore blocked signals mask */
1964 sigmask |= sigmask(SIGKILL);
1966 DebugCTX("calling longjmp\n");
1967 do_longjmp(0, sigmask, cr0_hi, cr1_lo, pcsp_lo, pcsp_hi,
1968 AS(cr1_hi).br, fpcr, fpsr, pfpfr, 1);
1970 return 0;
1972 #endif
1974 #ifdef CONFIG_PROTECTED_MODE
1975 long protected_sys_setcontext(const struct ucontext_prot __user *ucp,
1976 int sigsetsize)
1978 int rval;
1979 e2k_cr0_hi_t cr0_hi;
1980 e2k_cr1_lo_t cr1_lo;
1981 e2k_cr1_hi_t cr1_hi;
1982 e2k_pcsp_lo_t pcsp_lo;
1983 e2k_pcsp_hi_t pcsp_hi;
1984 u64 sigmask, prev_key, next_key;
1985 u32 fpcr, fpsr, pfpfr;
1987 if (sigsetsize != sizeof(sigset_t))
1988 return -EINVAL;
1990 if (!access_ok(ACCESS_WRITE, ucp, sizeof(struct ucontext)))
1991 return -EFAULT;
1993 rval = __get_user(next_key, &ucp->uc_mcontext.sbr);
1994 if (rval)
1995 return -EFAULT;
1997 prev_key = context_ti_key(current_thread_info());
1999 DebugCTX("ucp=%lx current key=0x%lx next key=0x%lx\n",
2000 ucp, prev_key, next_key);
2001 if (!context_keys_equal(prev_key, next_key))
2002 return do_swapcontext(NULL, ucp, false, CTX_128_BIT);
2004 rval = __copy_from_user(&sigmask, &ucp->uc_sigmask,
2005 sizeof(ucp->uc_sigmask));
2006 rval |= __get_user(AW(cr0_hi), &ucp->uc_mcontext.cr0_hi);
2007 rval |= __get_user(AW(cr1_lo), &ucp->uc_mcontext.cr1_lo);
2008 rval |= __get_user(AW(cr1_hi), &ucp->uc_mcontext.cr1_hi);
2009 rval |= __get_user(AW(pcsp_lo), &ucp->uc_mcontext.pcsp_lo);
2010 rval |= __get_user(AW(pcsp_hi), &ucp->uc_mcontext.pcsp_hi);
2011 rval |= __get_user(fpcr, &ucp->uc_extra.fpcr);
2012 rval |= __get_user(fpsr, &ucp->uc_extra.fpsr);
2013 rval |= __get_user(pfpfr, &ucp->uc_extra.pfpfr);
2014 if (rval)
2015 return -EFAULT;
2017 /* A hack to make do_longjmp() restore blocked signals mask */
2018 sigmask |= sigmask(SIGKILL);
2020 DebugCTX("calling longjmp\n");
2021 do_longjmp(0, sigmask, cr0_hi, cr1_lo, pcsp_lo, pcsp_hi,
2022 AS(cr1_hi).br, fpcr, fpsr, pfpfr, 1);
2024 return 0;
2026 #endif