6 * Copyright (C) 1998-2000 Hewlett-Packard Co
7 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
8 * Copyright (C) 1999 VA Linux Systems
9 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
10 * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com>
11 * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
14 * Global (preserved) predicate usage on syscall entry/exit path:
19 * p2: (Alias of pKern!) True if any signals are pending.
22 #include <linux/config.h>
24 #include <asm/cache.h>
25 #include <asm/errno.h>
26 #include <asm/offsets.h>
27 #include <asm/processor.h>
28 #include <asm/unistd.h>
29 #include <asm/asmmacro.h>
39 * execve() is special because in case of success, we need to
40 * setup a null register window frame.
43 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3))
44 alloc loc1=ar.pfs,3,2,4,0
47 mov out0=in0 // filename
48 ;; // stop bit between alloc and call
51 add out3=16,sp // regs
52 br.call.sptk.few rp=sys_execve
53 .ret0: cmp4.ge p6,p0=r8,r0
54 mov ar.pfs=loc1 // restore ar.pfs
56 (p6) mov ar.pfs=r0 // clear ar.pfs in case of success
57 sxt4 r8=r8 // return 64-bit result
63 GLOBAL_ENTRY(sys_clone2)
64 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2))
65 alloc r16=ar.pfs,3,2,4,0
68 mov loc1=r16 // save ar.pfs across do_fork
72 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
73 mov out0=in0 // out0 = clone_flags
74 br.call.sptk.few rp=do_fork
75 .ret1: UNW(.restore sp)
76 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
82 GLOBAL_ENTRY(sys_clone)
83 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2))
84 alloc r16=ar.pfs,2,2,4,0
87 mov loc1=r16 // save ar.pfs across do_fork
91 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
92 mov out0=in0 // out0 = clone_flags
93 br.call.sptk.few rp=do_fork
94 .ret2: UNW(.restore sp)
95 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
102 * prev_task <- ia64_switch_to(struct task_struct *next)
104 GLOBAL_ENTRY(ia64_switch_to)
106 alloc r16=ar.pfs,1,0,0,0
110 adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
111 dep r18=-1,r0,0,61 // build mask 0x1fffffffffffffff
112 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
114 st8 [r22]=sp // save kernel stack pointer of old task
115 ld8 sp=[r21] // load kernel stack pointer of new task
116 and r20=in0,r18 // physical address of "current"
118 mov ar.k6=r20 // copy "current" into ar.k6
119 mov r8=r13 // return pointer to previously running task
120 mov r13=in0 // set "current" pointer
122 DO_LOAD_SWITCH_STACK( )
126 #ifndef CONFIG_IA64_NEW_UNWIND
128 * Like save_switch_stack, but also save the stack frame that is active
129 * at the time this function is called.
131 ENTRY(save_switch_stack_with_current_frame)
133 alloc r16=ar.pfs,0,0,0,0 // pass ar.pfs to save_switch_stack
136 END(save_switch_stack_with_current_frame)
137 #endif /* !CONFIG_IA64_NEW_UNWIND */
140 * Note that interrupts are enabled during save_switch_stack and
141 * load_switch_stack. This means that we may get an interrupt with
142 * "sp" pointing to the new kernel stack while ar.bspstore is still
143 * pointing to the old kernel backing store area. Since ar.rsc,
144 * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts,
145 * this is not a problem. Also, we don't need to specify unwind
146 * information for preserved registers that are not modified in
147 * save_switch_stack as the right unwind information is already
148 * specified at the call-site of save_switch_stack.
154 * - b7 holds address to return to
155 * - rp (b0) holds return address to save
157 GLOBAL_ENTRY(save_switch_stack)
160 flushrs // flush dirty regs to backing store (must be first in insn group)
161 mov r17=ar.unat // preserve caller's
162 adds r2=16,sp // r2 = &sw->caller_unat
164 mov r18=ar.fpsr // preserve fpsr
165 mov ar.rsc=r0 // put RSE in mode: enforced lazy, little endian, pl 0
168 adds r3=24,sp // r3 = &sw->ar_fpsr
170 .savesp ar.unat,SW(CALLER_UNAT)
172 .savesp ar.fpsr,SW(AR_FPSR)
183 stf.spill [r2]=f10,32
184 stf.spill [r3]=f11,32
187 stf.spill [r2]=f12,32
188 stf.spill [r3]=f13,32
191 stf.spill [r2]=f14,32
192 stf.spill [r3]=f15,32
195 stf.spill [r2]=f16,32
196 stf.spill [r3]=f17,32
199 stf.spill [r2]=f18,32
200 stf.spill [r3]=f19,32
203 stf.spill [r2]=f20,32
204 stf.spill [r3]=f21,32
205 mov r17=ar.lc // I-unit
207 stf.spill [r2]=f22,32
208 stf.spill [r3]=f23,32
210 stf.spill [r2]=f24,32
211 stf.spill [r3]=f25,32
213 stf.spill [r2]=f26,32
214 stf.spill [r3]=f27,32
216 stf.spill [r2]=f28,32
217 stf.spill [r3]=f29,32
219 stf.spill [r2]=f30,32
220 stf.spill [r3]=f31,24
222 .mem.offset 0,0; st8.spill [r2]=r4,16
223 .mem.offset 8,0; st8.spill [r3]=r5,16
225 .mem.offset 0,0; st8.spill [r2]=r6,16
226 .mem.offset 8,0; st8.spill [r3]=r7,16
228 st8 [r2]=r21,16 // save b0
229 st8 [r3]=r22,16 // save b1
230 /* since we're done with the spills, read and save ar.unat: */
231 mov r18=ar.unat // M-unit
232 mov r20=ar.bspstore // M-unit
234 st8 [r2]=r23,16 // save b2
235 st8 [r3]=r24,16 // save b3
237 st8 [r2]=r25,16 // save b4
238 st8 [r3]=r26,16 // save b5
240 st8 [r2]=r16,16 // save ar.pfs
241 st8 [r3]=r17,16 // save ar.lc
244 st8 [r2]=r18,16 // save ar.unat
245 st8 [r3]=r19,16 // save ar.rnat
248 st8 [r2]=r20 // save ar.bspstore
249 st8 [r3]=r21 // save predicate registers
250 mov ar.rsc=3 // put RSE back into eager mode, pl 0
252 END(save_switch_stack)
256 * - b7 holds address to return to
258 ENTRY(load_switch_stack)
261 invala // invalidate ALAT
263 adds r2=IA64_SWITCH_STACK_B0_OFFSET+16,sp // get pointer to switch_stack.b0
264 mov ar.rsc=r0 // put RSE into enforced lazy mode
265 adds r3=IA64_SWITCH_STACK_B0_OFFSET+24,sp // get pointer to switch_stack.b1
267 ld8 r21=[r2],16 // restore b0
268 ld8 r22=[r3],16 // restore b1
270 ld8 r23=[r2],16 // restore b2
271 ld8 r24=[r3],16 // restore b3
273 ld8 r25=[r2],16 // restore b4
274 ld8 r26=[r3],16 // restore b5
276 ld8 r16=[r2],16 // restore ar.pfs
277 ld8 r17=[r3],16 // restore ar.lc
279 ld8 r18=[r2],16 // restore ar.unat
280 ld8 r19=[r3],16 // restore ar.rnat
283 ld8 r20=[r2] // restore ar.bspstore
284 ld8 r21=[r3] // restore predicate registers
289 loadrs // invalidate stacked regs outside current frame
290 adds r2=16-IA64_SWITCH_STACK_SIZE,r2 // get pointer to switch_stack.caller_unat
291 ;; // stop bit for rnat dependency
293 mov ar.unat=r18 // establish unat holding the NaT bits for r4-r7
294 adds r3=16-IA64_SWITCH_STACK_SIZE,r3 // get pointer to switch_stack.ar_fpsr
296 ld8 r18=[r2],16 // restore caller's unat
297 ld8 r19=[r3],24 // restore fpsr
350 mov ar.unat=r18 // restore caller's unat
351 mov ar.fpsr=r19 // restore fpsr
352 mov ar.rsc=3 // put RSE back into eager mode, pl 0
354 END(load_switch_stack)
356 GLOBAL_ENTRY(__ia64_syscall)
358 mov r15=in5 // put syscall number in place
359 break __BREAK_SYSCALL
369 // We invoke syscall_trace through this intermediate function to
370 // ensure that the syscall input arguments are not clobbered. We
371 // also use it to preserve b6, which contains the syscall entry point.
373 GLOBAL_ENTRY(invoke_syscall_trace)
374 #ifdef CONFIG_IA64_NEW_UNWIND
375 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
376 alloc loc1=ar.pfs,8,3,0,0
381 br.call.sptk.few rp=syscall_trace
386 #else /* !CONFIG_IA64_NEW_SYSCALL */
387 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
388 alloc loc1=ar.pfs,8,3,0,0
389 ;; // WAW on CFM at the br.call
391 br.call.sptk.many rp=save_switch_stack_with_current_frame // must preserve b6!!
393 br.call.sptk.few rp=syscall_trace
394 .ret5: adds sp=IA64_SWITCH_STACK_SIZE,sp // drop switch_stack frame
400 #endif /* !CONFIG_IA64_NEW_SYSCALL */
401 END(invoke_syscall_trace)
404 // Invoke a system call, but do some tracing before and after the call.
405 // We MUST preserve the current register frame throughout this routine
406 // because some system calls (such as ia64_execve) directly
407 // manipulate ar.pfs.
410 // r15 = syscall number
411 // b6 = syscall entry point
413 .global ia64_strace_leave_kernel
415 GLOBAL_ENTRY(ia64_trace_syscall)
416 PT_REGS_UNWIND_INFO(0)
417 br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args
418 .ret6: br.call.sptk.few rp=b6 // do the syscall
420 cmp.lt p6,p0=r8,r0 // syscall failed?
421 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
422 adds r3=IA64_PT_REGS_R8_OFFSET+32,sp // r3 = &pt_regs.r10
424 (p6) br.cond.sptk.few strace_error // syscall failed ->
425 ;; // avoid RAW on r10
427 .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
428 .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
429 ia64_strace_leave_kernel:
430 br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch return value
431 .rety: br.cond.sptk.many ia64_leave_kernel
434 ld8 r3=[r2] // load pt_regs.r8
435 sub r9=0,r8 // negate return value to get errno value
437 cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
438 adds r3=16,r2 // r3=&pt_regs.r10
442 br.cond.sptk.few strace_save_retval
443 END(ia64_trace_syscall)
446 * A couple of convenience macros to help implement/understand the state
447 * restoration that happens at the end of ia64_ret_from_syscall.
457 #define rARBSPSTORE r23
461 GLOBAL_ENTRY(ia64_ret_from_clone)
462 PT_REGS_UNWIND_INFO(0)
464 // In SMP mode, we need to call schedule_tail to complete the scheduling process.
465 // Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
466 // address of the previously executing task.
467 br.call.sptk.few rp=invoke_schedule_tail
470 adds r2=IA64_TASK_PTRACE_OFFSET,r13
475 tbit.nz p6,p0=r2,PT_TRACESYS_BIT
476 (p6) br strace_check_retval
477 ;; // added stop bits to prevent r8 dependency
478 END(ia64_ret_from_clone)
480 GLOBAL_ENTRY(ia64_ret_from_syscall)
481 PT_REGS_UNWIND_INFO(0)
482 cmp.ge p6,p7=r8,r0 // syscall executed successfully?
483 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
484 adds r3=IA64_PT_REGS_R8_OFFSET+32,sp // r3 = &pt_regs.r10
487 (p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
489 (p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
490 (p7) br.cond.spnt.few handle_syscall_error // handle potential syscall failure
491 END(ia64_ret_from_syscall)
493 GLOBAL_ENTRY(ia64_leave_kernel)
494 // check & deliver software interrupts:
496 PT_REGS_UNWIND_INFO(0)
498 adds r2=IA64_TASK_PROCESSOR_OFFSET,r13
499 movl r3=irq_stat // softirq_active
503 shl r2=r2,SMP_LOG_CACHE_BYTES // can't use shladd here...
507 movl r3=irq_stat // softirq_active
510 ld8 r2=[r3] // r3 (softirq_active+softirq_mask) is guaranteed to be 8-byte aligned!
517 (p6) br.call.spnt.many rp=invoke_do_softirq
519 (pKern) br.cond.dpnt.many restore_all // yup -> skip check for rescheduling & signal delivery
521 // call schedule() until we find a task that doesn't have need_resched set:
525 adds r2=IA64_TASK_NEED_RESCHED_OFFSET,r13
527 adds r14=IA64_TASK_SIGPENDING_OFFSET,r13
532 mov rp=r3 // arrange for schedule() to return to back_from_resched
535 cmp.ne p2,p0=r14,r0 // NOTE: pKern is an alias for p2!!
537 (p6) br.call.spnt.many b6=invoke_schedule // ignore return value
539 // check & deliver pending signals:
540 (p2) br.call.spnt.few rp=handle_signal_delivery
542 #if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
543 // Check for lost ticks
546 movl r14 = 1000 // latency tolerance
553 cmp.ge p6,p7 = r2, r0
554 (p6) br.call.spnt.few rp=invoke_ia64_reset_itm
561 // start restoring the state saved on the kernel stack (struct pt_regs):
563 adds r2=IA64_PT_REGS_R8_OFFSET+16,r12
564 adds r3=IA64_PT_REGS_R8_OFFSET+24,r12
596 ld8 r1=[r2],16 // ar.ccv
597 ld8 r13=[r3],16 // ar.fpsr
599 ld8 r14=[r2],16 // b0
600 ld8 r15=[r3],16+8 // b7
611 // turn off interrupts, interrupt collection, & data translation
612 rsm psr.i | psr.ic | psr.dt
617 invala // invalidate ALAT
618 dep r12=0,r12,61,3 // convert sp to physical address
619 bsw.0;; // switch back to bank 0 (must be last in insn group)
621 #ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
632 ld8 rCRIPSR=[r16],16 // load cr.ipsr
633 ld8 rCRIIP=[r17],16 // load cr.iip
635 ld8 rCRIFS=[r16],16 // load cr.ifs
636 ld8 rARUNAT=[r17],16 // load ar.unat
638 ld8 rARPFS=[r16],16 // load ar.pfs
639 ld8 rARRSC=[r17],16 // load ar.rsc
641 ld8 rARRNAT=[r16],16 // load ar.rnat (may be garbage)
642 ld8 rARBSPSTORE=[r17],16 // load ar.bspstore (may be garbage)
644 ld8 rARPR=[r16],16 // load predicates
645 ld8 rB6=[r17],16 // load b6
647 ld8 r18=[r16],16 // load ar.rsc value for "loadrs"
648 ld8.fill r1=[r17],16 // load r1
653 ld8.fill r12=[r16],16
654 ld8.fill r13=[r17],16
655 extr.u r19=rCRIPSR,32,2 // extract ps.cpl
657 ld8.fill r14=[r16],16
658 ld8.fill r15=[r17],16
659 cmp.eq p6,p7=r0,r19 // are we returning to kernel mode? (psr.cpl==0)
663 (p6) br.cond.dpnt.few skip_rbs_switch
666 * Restore user backing store.
668 * NOTE: alloc, loadrs, and cover can't be predicated.
670 * XXX This needs some scheduling/tuning once we believe it
671 * really does work as intended.
673 mov r16=ar.bsp // get existing backing store pointer
674 (pNonSys) br.cond.dpnt.few dont_preserve_current_frame
675 cover // add current frame into dirty partition
677 mov rCRIFS=cr.ifs // fetch the cr.ifs value that "cover" produced
678 mov r17=ar.bsp // get new backing store pointer
680 sub r16=r17,r16 // calculate number of bytes that were added to rbs
682 shl r16=r16,16 // shift additional frame size into position for loadrs
684 add r18=r16,r18 // adjust the loadrs value
686 dont_preserve_current_frame:
687 alloc r16=ar.pfs,0,0,0,0 // drop the current call frame (noop for syscalls)
689 mov ar.rsc=r18 // load ar.rsc to be used for "loadrs"
690 #ifdef CONFIG_IA32_SUPPORT
691 tbit.nz p6,p0=rCRIPSR,IA64_PSR_IS_BIT
693 (p6) mov ar.rsc=r0 // returning to IA32 mode
698 mov ar.bspstore=rARBSPSTORE
700 mov ar.rnat=rARRNAT // must happen with RSE in lazy mode
705 mov cr.ifs=rCRIFS // restore cr.ifs only if not a (synchronous) syscall
710 rfi;; // must be last instruction in an insn group
711 END(ia64_leave_kernel)
713 ENTRY(handle_syscall_error)
715 * Some system calls (e.g., ptrace, mmap) can return arbitrary
716 * values which could lead us to mistake a negative return
717 * value as a failed syscall. Those syscall must deposit
718 * a non-zero value in pt_regs.r8 to indicate an error.
719 * If pt_regs.r8 is zero, we assume that the call completed
722 PT_REGS_UNWIND_INFO(0)
723 ld8 r3=[r2] // load pt_regs.r8
724 sub r9=0,r8 // negate return value to get errno
726 mov r10=-1 // return -1 in pt_regs.r10 to indicate error
727 cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
728 adds r3=16,r2 // r3=&pt_regs.r10
733 .mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit
734 .mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit
735 br.cond.sptk.many ia64_leave_kernel
736 END(handle_syscall_error)
740 * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
741 * in case a system call gets restarted.
743 ENTRY(invoke_schedule_tail)
744 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
745 alloc loc1=ar.pfs,8,2,1,0
747 mov out0=r8 // Address of previous task
749 br.call.sptk.few rp=schedule_tail
750 .ret11: mov ar.pfs=loc1
753 END(invoke_schedule_tail)
755 #endif /* CONFIG_SMP */
757 #if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
759 ENTRY(invoke_ia64_reset_itm)
760 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
761 alloc loc1=ar.pfs,8,2,0,0
765 br.call.sptk.many rp=ia64_reset_itm
770 END(invoke_ia64_reset_itm)
772 #endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC || CONFIG_IA64_SOFTSDV_HACKS */
775 * Invoke do_softirq() while preserving in0-in7, which may be needed
776 * in case a system call gets restarted.
778 ENTRY(invoke_do_softirq)
779 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
780 alloc loc1=ar.pfs,8,2,0,0
784 br.call.sptk.few rp=do_softirq
785 .ret13: mov ar.pfs=loc1
788 END(invoke_do_softirq)
791 * Invoke schedule() while preserving in0-in7, which may be needed
792 * in case a system call gets restarted.
794 ENTRY(invoke_schedule)
795 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
796 alloc loc1=ar.pfs,8,2,0,0
800 br.call.sptk.few rp=schedule
801 .ret14: mov ar.pfs=loc1
807 // Setup stack and call ia64_do_signal. Note that pSys and pNonSys need to
808 // be set up by the caller. We declare 8 input registers so the system call
809 // args get preserved, in case we need to restart a system call.
811 ENTRY(handle_signal_delivery)
812 #ifdef CONFIG_IA64_NEW_UNWIND
813 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
814 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
816 mov loc0=rp // save return address
817 mov out0=0 // there is no "oldset"
818 adds out1=0,sp // out1=&sigscratch
819 (pSys) mov out2=1 // out2==1 => we're in a syscall
821 (pNonSys) mov out2=0 // out2==0 => not a syscall
823 .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
824 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
826 br.call.sptk.few rp=ia64_do_signal
828 adds sp=16,sp // pop scratch stack space
830 ld8 r9=[sp] // load new unat from sw->caller_unat
836 #else /* !CONFIG_IA64_NEW_UNWIND */
838 alloc r16=ar.pfs,8,0,3,0 // preserve all eight input regs in case of syscall restart!
842 mov out0=0 // there is no "oldset"
843 adds out1=16,sp // out1=&sigscratch
844 .pred.rel.mutex pSys, pNonSys
845 (pSys) mov out2=1 // out2==1 => we're in a syscall
846 (pNonSys) mov out2=0 // out2==0 => not a syscall
847 br.call.sptk.few rp=ia64_do_signal
848 .ret16: // restore the switch stack (ptrace may have modified it)
849 DO_LOAD_SWITCH_STACK( )
851 #endif /* !CONFIG_IA64_NEW_UNWIND */
852 END(handle_signal_delivery)
854 GLOBAL_ENTRY(sys_rt_sigsuspend)
855 #ifdef CONFIG_IA64_NEW_UNWIND
856 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
857 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
859 mov loc0=rp // save return address
861 mov out1=in1 // sigsetsize
862 adds out2=0,sp // out2=&sigscratch
865 .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
866 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
868 br.call.sptk.few rp=ia64_rt_sigsuspend
870 adds sp=16,sp // pop scratch stack space
872 ld8 r9=[sp] // load new unat from sw->caller_unat
878 #else /* !CONFIG_IA64_NEW_UNWIND */
879 UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2))
880 alloc r16=ar.pfs,2,0,3,0
885 mov out1=in1 // sigsetsize
886 adds out2=16,sp // out1=&sigscratch
887 br.call.sptk.many rp=ia64_rt_sigsuspend
888 .ret18: // restore the switch stack (ptrace may have modified it)
889 DO_LOAD_SWITCH_STACK( )
891 #endif /* !CONFIG_IA64_NEW_UNWIND */
892 END(sys_rt_sigsuspend)
894 ENTRY(sys_rt_sigreturn)
895 #ifdef CONFIG_IA64_NEW_UNWIND
896 .regstk 0,0,3,0 // inherited from gate.s:invoke_sighandler()
897 PT_REGS_UNWIND_INFO(0)
902 cmp.eq pNonSys,p0=r0,r0 // sigreturn isn't a normal syscall...
904 adds out0=16,sp // out0 = &sigscratch
905 br.call.sptk.few rp=ia64_rt_sigreturn
906 .ret19: adds sp=16,sp // doesn't drop pt_regs, so don't mark it as restoring sp!
907 PT_REGS_UNWIND_INFO(0) // instead, create a new body section with the smaller frame
909 ld8 r9=[sp] // load new ar.unat
914 #else /* !CONFIG_IA64_NEW_UNWIND */
915 .regstk 0,0,3,0 // inherited from gate.s:invoke_sighandler()
916 PT_REGS_UNWIND_INFO(0)
918 UNW(.fframe IA64_PT_REGS_SIZE+IA64_SWITCH_STACK_SIZE)
919 UNW(.spillsp rp, PT(CR_IIP)+IA64_SWITCH_STACK_SIZE)
920 UNW(.spillsp ar.pfs, PT(CR_IFS)+IA64_SWITCH_STACK_SIZE)
921 UNW(.spillsp ar.unat, PT(AR_UNAT)+IA64_SWITCH_STACK_SIZE)
922 UNW(.spillsp pr, PT(PR)+IA64_SWITCH_STACK_SIZE)
923 adds sp=-IA64_SWITCH_STACK_SIZE,sp
924 cmp.eq pNonSys,p0=r0,r0 // sigreturn isn't a normal syscall...
928 adds out0=16,sp // out0 = &sigscratch
929 br.call.sptk.few rp=ia64_rt_sigreturn
930 .ret20: adds r3=IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp
932 ld8 r9=[r3] // load new ar.unat
935 PT_REGS_UNWIND_INFO(0)
936 adds sp=IA64_SWITCH_STACK_SIZE,sp // drop (dummy) switch-stack frame
939 #endif /* !CONFIG_IA64_NEW_UNWIND */
940 END(sys_rt_sigreturn)
942 GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
944 // r16 = fake ar.pfs, we simply need to make sure
945 // privilege is still 0
947 PT_REGS_UNWIND_INFO(0)
951 br.call.sptk.few rp=ia64_handle_unaligned // stack frame setup in ivt
953 DO_LOAD_SWITCH_STACK(PT_REGS_UNWIND_INFO(0))
954 br.cond.sptk.many rp // goes to ia64_leave_kernel
955 END(ia64_prepare_handle_unaligned)
957 #ifdef CONFIG_IA64_NEW_UNWIND
960 // unw_init_running(void (*callback)(info, arg), void *arg)
962 # define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
964 GLOBAL_ENTRY(unw_init_running)
965 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
966 alloc loc1=ar.pfs,2,3,3,0
974 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
975 .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
976 SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
977 adds sp=-EXTRA_FRAME_SIZE,sp
980 adds out0=16,sp // &info
981 mov out1=r13 // current
982 adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack
983 br.call.sptk.few rp=unw_init_frame_info
984 1: adds out0=16,sp // &info
986 mov loc2=gp // save gp across indirect function call
990 br.call.sptk.few rp=b6 // invoke the callback function
991 1: mov gp=loc2 // restore gp
993 // For now, we don't allow changing registers from within
994 // unw_init_running; if we ever want to allow that, we'd
995 // have to do a load_switch_stack here:
997 adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
1002 END(unw_init_running)
1008 .globl sys_call_table
1010 data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S.
1011 data8 sys_exit // 1025
1016 data8 sys_creat // 1030
1021 data8 sys_fchdir // 1035
1026 data8 sys_lseek // 1040
1031 data8 sys_setuid // 1045
1036 data8 sys_sync // 1050
1041 data8 sys_mkdir // 1055
1046 data8 ia64_brk // 1060
1051 data8 sys_ioctl // 1065
1056 data8 sys_dup2 // 1070
1061 data8 sys_getresgid // 1075
1066 data8 sys_setpgid // 1080
1069 data8 sys_sethostname
1071 data8 sys_getrlimit // 1085
1073 data8 sys_gettimeofday
1074 data8 sys_settimeofday
1076 data8 sys_poll // 1090
1081 data8 sys_swapoff // 1095
1086 data8 sys_fchown // 1100
1087 data8 ia64_getpriority
1088 data8 sys_setpriority
1091 data8 sys_ioperm // 1105
1096 data8 sys_msgsnd // 1110
1101 data8 sys_shmdt // 1115
1106 data8 ia64_oldstat // 1120
1111 data8 sys_vm86 // 1125
1115 data8 sys_setdomainname
1116 data8 sys_newuname // 1130
1118 data8 ia64_create_module
1119 data8 sys_init_module
1120 data8 sys_delete_module
1121 data8 sys_get_kernel_syms // 1135
1122 data8 sys_query_module
1126 data8 sys_personality // 1140
1127 data8 ia64_ni_syscall // sys_afs_syscall
1131 data8 sys_flock // 1145
1136 data8 sys_sysctl // 1150
1141 data8 sys_mprotect // 1155
1145 data8 sys_munlockall
1146 data8 sys_sched_getparam // 1160
1147 data8 sys_sched_setparam
1148 data8 sys_sched_getscheduler
1149 data8 sys_sched_setscheduler
1150 data8 sys_sched_yield
1151 data8 sys_sched_get_priority_max // 1165
1152 data8 sys_sched_get_priority_min
1153 data8 sys_sched_rr_get_interval
1155 data8 sys_nfsservctl
1156 data8 sys_prctl // 1170
1157 data8 sys_getpagesize
1159 data8 sys_pciconfig_read
1160 data8 sys_pciconfig_write
1161 data8 sys_perfmonctl // 1175
1162 data8 sys_sigaltstack
1163 data8 sys_rt_sigaction
1164 data8 sys_rt_sigpending
1165 data8 sys_rt_sigprocmask
1166 data8 sys_rt_sigqueueinfo // 1180
1167 data8 sys_rt_sigreturn
1168 data8 sys_rt_sigsuspend
1169 data8 sys_rt_sigtimedwait
1171 data8 sys_capget // 1185
1174 data8 sys_ni_syscall // sys_getpmsg (STREAMS)
1175 data8 sys_ni_syscall // sys_putpmsg (STREAMS)
1176 data8 sys_socket // 1190
1181 data8 sys_getsockname // 1195
1182 data8 sys_getpeername
1183 data8 sys_socketpair
1186 data8 sys_recv // 1200
1189 data8 sys_setsockopt
1190 data8 sys_getsockopt
1191 data8 sys_sendmsg // 1205
1193 data8 sys_pivot_root
1196 data8 sys_newstat // 1210
1200 data8 sys_getdents64
1201 data8 ia64_ni_syscall // 1215
1202 data8 ia64_ni_syscall
1203 data8 ia64_ni_syscall
1204 data8 ia64_ni_syscall
1205 data8 ia64_ni_syscall
1206 data8 ia64_ni_syscall // 1220
1207 data8 ia64_ni_syscall
1208 data8 ia64_ni_syscall
1209 data8 ia64_ni_syscall
1210 data8 ia64_ni_syscall
1211 data8 ia64_ni_syscall // 1225
1212 data8 ia64_ni_syscall
1213 data8 ia64_ni_syscall
1214 data8 ia64_ni_syscall
1215 data8 ia64_ni_syscall
1216 data8 ia64_ni_syscall // 1230
1217 data8 ia64_ni_syscall
1218 data8 ia64_ni_syscall
1219 data8 ia64_ni_syscall
1220 data8 ia64_ni_syscall
1221 data8 ia64_ni_syscall // 1235
1222 data8 ia64_ni_syscall
1223 data8 ia64_ni_syscall
1224 data8 ia64_ni_syscall
1225 data8 ia64_ni_syscall
1226 data8 ia64_ni_syscall // 1240
1227 data8 ia64_ni_syscall
1228 data8 ia64_ni_syscall
1229 data8 ia64_ni_syscall
1230 data8 ia64_ni_syscall
1231 data8 ia64_ni_syscall // 1245
1232 data8 ia64_ni_syscall
1233 data8 ia64_ni_syscall
1234 data8 ia64_ni_syscall
1235 data8 ia64_ni_syscall
1236 data8 ia64_ni_syscall // 1250
1237 data8 ia64_ni_syscall
1238 data8 ia64_ni_syscall
1239 data8 ia64_ni_syscall
1240 data8 ia64_ni_syscall
1241 data8 ia64_ni_syscall // 1255
1242 data8 ia64_ni_syscall
1243 data8 ia64_ni_syscall
1244 data8 ia64_ni_syscall
1245 data8 ia64_ni_syscall
1246 data8 ia64_ni_syscall // 1260
1247 data8 ia64_ni_syscall
1248 data8 ia64_ni_syscall
1249 data8 ia64_ni_syscall
1250 data8 ia64_ni_syscall
1251 data8 ia64_ni_syscall // 1265
1252 data8 ia64_ni_syscall
1253 data8 ia64_ni_syscall
1254 data8 ia64_ni_syscall
1255 data8 ia64_ni_syscall
1256 data8 ia64_ni_syscall // 1270
1257 data8 ia64_ni_syscall
1258 data8 ia64_ni_syscall
1259 data8 ia64_ni_syscall
1260 data8 ia64_ni_syscall
1261 data8 ia64_ni_syscall // 1275
1262 data8 ia64_ni_syscall
1263 data8 ia64_ni_syscall
1264 data8 ia64_ni_syscall