2 * arch/s390/kernel/entry.S
3 * S390 low-level entry points.
6 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Hartmut Penner (hp@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
12 #include <linux/sys.h>
13 #include <linux/linkage.h>
14 #include <linux/config.h>
15 #include <asm/lowcore.h>
16 #include <asm/errno.h>
19 #include <asm/s390-regs-common.h>
23 * stack layout for the system_call stack entry
24 * Martin please don't modify these back to hard coded values
25 * You know how bad I'm at mental arithmetic DJB & it gives
26 * me grief when I modify the pt_regs
28 SP_PTREGS = STACK_FRAME_OVERHEAD
30 SP_R0 = (SP_PSW+PSW_MASK_SIZE+PSW_ADDR_SIZE)
31 SP_R1 = (SP_R0+GPR_SIZE)
32 SP_R2 = (SP_R1+GPR_SIZE)
33 SP_R3 = (SP_R2+GPR_SIZE)
34 SP_R4 = (SP_R3+GPR_SIZE)
35 SP_R5 = (SP_R4+GPR_SIZE)
36 SP_R6 = (SP_R5+GPR_SIZE)
37 SP_R7 = (SP_R6+GPR_SIZE)
38 SP_R8 = (SP_R7+GPR_SIZE)
39 SP_R9 = (SP_R8+GPR_SIZE)
40 SP_RA = (SP_R9+GPR_SIZE)
41 SP_RB = (SP_RA+GPR_SIZE)
42 SP_RC = (SP_RB+GPR_SIZE)
43 SP_RD = (SP_RC+GPR_SIZE)
44 SP_RE = (SP_RD+GPR_SIZE)
45 SP_RF = (SP_RE+GPR_SIZE)
46 SP_AREGS = (SP_RF+GPR_SIZE)
47 SP_ORIG_R2 = (SP_AREGS+(NUM_ACRS*ACR_SIZE))
48 SP_TRAP = (SP_ORIG_R2+GPR_SIZE)
49 #if CONFIG_REMOTE_DEBUG
50 SP_CRREGS = (SP_TRAP+4)
51 /* fpu registers are saved & restored by the gdb stub itself */
52 SP_FPC = (SP_CRREGS+(NUM_CRS*CR_SIZE))
53 SP_FPRS = (SP_FPC+FPC_SIZE+FPC_PAD_SIZE)
54 /* SP_PGM_OLD_ILC etc are not part of pt_regs & they are not
55 defined in ptrace.h but space is needed for this too */
56 SP_PGM_OLD_ILC= (SP_FPRS+(NUM_FPRS*FPR_SIZE))
58 SP_PGM_OLD_ILC= (SP_TRAP+4)
60 SP_SVC_STEP = (SP_PGM_OLD_ILC+4)
61 SP_SIZE = (SP_SVC_STEP+4)
63 * these defines are offsets into the thread_struct
66 _TSS_FPRS = (_TSS_PTREGS+8)
67 _TSS_AR2 = (_TSS_FPRS+136)
68 _TSS_AR4 = (_TSS_AR2+4)
69 _TSS_KSP = (_TSS_AR4+4)
70 _TSS_USERSEG = (_TSS_KSP+4)
71 _TSS_ERROR = (_TSS_USERSEG+4)
72 _TSS_PROT = (_TSS_ERROR+4)
73 _TSS_TRAP = (_TSS_PROT+4)
74 _TSS_MM = (_TSS_TRAP+4)
75 _TSS_PER = (_TSS_MM+8)
78 * these are offsets into the task-struct.
86 /* PSW related defines */
93 /* some code left lying around in case we need a
94 * printk for debugging purposes
96 sysc_printk: .long printk
97 sysc_msg: .string "<2>r15 %X\n"
105 l %r9,sysc_printk-sysc_lit(%r13)
106 la %r2,sysc_msg-sysc_lit(%r13)
113 * Register usage in interrupt handlers:
114 * R9 - pointer to current task structure
115 * R13 - pointer to literal pool
116 * R14 - return register for function calls
117 * R15 - kernel stack pointer
120 #define SAVE_ALL(psworg) \
121 st %r15,__LC_SAVE_AREA ; \
122 tm psworg+1,0x01 ; /* test problem state bit */ \
123 jz 0f ; /* skip stack setup save */ \
124 l %r15,__LC_KERNEL_STACK ; /* problem state -> load ksp */ \
125 0: ahi %r15,-SP_SIZE ; /* make room for registers & psw */ \
127 sll %r15,3 ; /* align stack pointer to 8 */ \
128 stm %r0,%r14,SP_R0(%r15) ; /* store gprs 0-14 to kernel stack */ \
129 st %r2,SP_ORIG_R2(%r15) ; /* store original content of gpr 2 */ \
130 mvc SP_RF(4,%r15),__LC_SAVE_AREA ; /* move R15 to stack */ \
131 stam %a0,%a15,SP_AREGS(%r15) ; /* store access registers to kst. */ \
132 mvc SP_PSW(8,%r15),psworg ; /* move user PSW to stack */ \
133 lhi %r0,psworg ; /* store trap indication */ \
134 st %r0,SP_TRAP(%r15) ; \
135 xc 0(4,%r15),0(%r15) ; /* clear back chain */ \
136 tm psworg+1,0x01 ; /* kmod.c .wishes the set_fs & gs */ \
137 jz 1f ; /* to work across syscalls */ \
139 sar %a2,%r0 ; /* set ac.reg. 2 to primary space */ \
141 sar %a4,%r0 ; /* set access reg. 4 to home space */ \
144 #define RESTORE_ALL \
145 mvc __LC_RETURN_PSW(8,0),SP_PSW(%r15) ; /* move user PSW to lowcore */ \
146 lam %a0,%a15,SP_AREGS(%r15) ; /* load the access registers */ \
147 lm %r0,%r15,SP_R0(%r15) ; /* load gprs 0-15 of user */ \
148 ni __LC_RETURN_PSW+1(0),0xfd ; /* clear wait state bit */ \
149 lpsw __LC_RETURN_PSW /* back to caller */
151 #define GET_CURRENT /* load pointer to task_struct to R9 */ \
157 * Scheduler resume function, called by switch_to
158 * grp2 = (thread_struct *) prev->tss
159 * grp3 = (thread_struct *) next->tss
165 l %r4,_TSS_PTREGS(%r3)
166 tm SP_PSW-SP_PTREGS(%r4),0x40 # is the new process using per ?
167 jz RES_DN1 # if not we're fine
168 stctl %r9,%r11,24(%r15) # We are using per stuff
169 clc _TSS_PER(12,%r3),24(%r15)
170 je RES_DN1 # we got away without bashing TLB's
171 lctl %c9,%c11,_TSS_PER(%r3) # Nope we didn't
173 stm %r6,%r15,24(%r15) # store resume registers of prev task
174 st %r15,_TSS_KSP(%r2) # store kernel stack ptr to prev->tss.ksp
177 l %r15,_TSS_KSP(%r3) # load kernel stack ptr from next->tss.ksp
181 st %r1,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
182 stam %a2,%a2,_TSS_AR2(%r2) # store kernel access reg. 2
183 stam %a4,%a4,_TSS_AR4(%r2) # store kernel access reg. 4
184 lam %a2,%a2,_TSS_AR2(%r3) # load kernel access reg. 2
185 lam %a4,%a4,_TSS_AR4(%r3) # load kernel access reg. 4
186 lr %r2,%r0 # return task_struct of last task
187 lm %r6,%r15,24(%r15) # load resume registers of next task
191 * SVC interrupt handler routine. System calls are synchronous events and
192 * are executed with interrupts enabled.
196 sysc_do_signal: .long do_signal
197 sysc_do_softirq: .long do_softirq
198 sysc_schedule: .long schedule
199 sysc_trace: .long syscall_trace
201 sysc_schedtail: .long schedule_tail
203 sysc_clone: .long sys_clone
204 sysc_fork: .long sys_fork
205 sysc_vfork: .long sys_vfork
206 sysc_sigreturn: .long sys_sigreturn
207 sysc_rt_sigreturn: .long sys_rt_sigreturn
208 sysc_execve: .long sys_execve
209 sysc_sigsuspend: .long sys_sigsuspend
210 sysc_rt_sigsuspend: .long sys_rt_sigsuspend
215 XC SP_SVC_STEP(4,%r15),SP_SVC_STEP(%r15)
218 ahi %r13,sysc_lit-. # setup base pointer R13 to sysc_lit
219 slr %r8,%r8 # gpr 8 is call save (-> tracesys)
220 ic %r8,0x8B # get svc number from lowcore
221 stosm 24(%r15),0x03 # reenable interrupts
222 GET_CURRENT # load pointer to task_struct to R9
224 l %r8,sys_call_table-sysc_lit(8,%r13) # get address of system call
225 tm flags+3(%r9),0x20 # PF_TRACESYS
227 basr %r14,%r8 # call sys_xxxx
228 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
229 # ATTENTION: check sys_execve_glue before
230 # changing anything here !!
233 GET_CURRENT # load pointer to task_struct to R9
234 tm SP_PSW+1(%r15),0x01 # returning to user ?
235 jno sysc_leave # no-> skip bottom half, resched & signal
237 # check, if bottom-half has to be done
239 l %r0,__LC_IRQ_STAT # get softirq_active
240 n %r0,__LC_IRQ_STAT+4 # and it with softirq_mask
241 jnz sysc_handle_bottom_half
243 # check, if reschedule is needed
246 icm %r0,15,need_resched(%r9) # get need_resched from task_struct
248 icm %r0,15,sigpending(%r9) # get sigpending from task_struct
249 jnz sysc_signal_return
251 icm %r0,15,SP_SVC_STEP(%r15) # get sigpending from task_struct
253 stnsm 24(%r15),disable # disable I/O and ext. interrupts
257 # call do_signal before return
260 la %r2,SP_PTREGS(%r15) # load pt_regs
261 sr %r3,%r3 # clear *oldset
262 l %r1,sysc_do_signal-sysc_lit(%r13)
263 la %r14,sysc_leave-sysc_lit(%r13)
264 br %r1 # return point is sysc_leave
267 # call trace before and after sys_call
270 l %r1,sysc_trace-sysc_lit(%r13)
272 st %r2,SP_R2(%r15) # give sysc_trace an -ENOSYS retval
274 lm %r3,%r6,SP_R3(%r15)
275 l %r2,SP_ORIG_R2(%r15)
276 basr %r14,%r8 # call sys_xxx
277 st %r2,SP_R2(%r15) # store return value
278 l %r1,sysc_trace-sysc_lit(%r13)
279 la %r14,sysc_return-sysc_lit(%r13)
280 br %r1 # return point is sysc_return
284 # call do_softirq and return from syscall, if interrupt-level
287 sysc_handle_bottom_half:
288 l %r1,sysc_do_softirq-sysc_lit(%r13)
289 la %r14,sysc_return_bh-sysc_lit(%r13)
290 br %r1 # call do_softirq
293 # call schedule with sysc_return as return-address
296 l %r1,sysc_schedule-sysc_lit(%r13)
297 la %r14,sysc_return-sysc_lit(%r13)
298 br %r1 # call scheduler, return to sysc_return
301 # a new process exits the kernel with ret_from_fork
306 ahi %r13,sysc_lit-. # setup base pointer R13 to $SYSCDAT
307 GET_CURRENT # load pointer to task_struct to R9
308 stosm 24(%r15),0x03 # reenable interrupts
309 sr %r0,%r0 # child returns 0
310 st %r0,SP_R2(%r15) # store return value (change R2 on stack)
312 l %r1,sysc_schedtail-sysc_lit(%r13)
313 la %r14,sysc_return-sysc_lit(%r13)
314 br %r1 # call schedule_tail, return to sysc_return
320 # clone, fork, vfork, exec and sigreturn need glue,
321 # because they all expect pt_regs as parameter,
322 # but are called with different parameter.
323 # return-address is set up above
326 la %r2,SP_PTREGS(%r15) # load pt_regs
327 l %r1,sysc_clone-sysc_lit(%r13)
328 br %r1 # branch to sys_clone
331 la %r2,SP_PTREGS(%r15) # load pt_regs
332 l %r1,sysc_fork-sysc_lit(%r13)
333 br %r1 # branch to sys_fork
336 la %r2,SP_PTREGS(%r15) # load pt_regs
337 l %r1,sysc_vfork-sysc_lit(%r13)
338 br %r1 # branch to sys_vfork
341 la %r2,SP_PTREGS(%r15) # load pt_regs
342 l %r1,sysc_execve-sysc_lit(%r13)
343 lr %r12,%r14 # save return address
344 basr %r14,%r1 # call sys_execve
345 ltr %r2,%r2 # check if execve failed
346 bnz 0(%r12) # it did fail -> store result in gpr2
347 b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8
348 # in system_call/sysc_tracesys
351 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
352 l %r1,sysc_sigreturn-sysc_lit(%r13)
353 br %r1 # branch to sys_sigreturn
355 sys_rt_sigreturn_glue:
356 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
357 l %r1,sysc_rt_sigreturn-sysc_lit(%r13)
358 br %r1 # branch to sys_sigreturn
361 # sigsuspend and rt_sigsuspend need pt_regs as an additional
362 # parameter and they have to skip the store of %r2 into the
363 # user register %r2 because the return value was set in
364 # sigsuspend and rt_sigsuspend already and must not be overwritten!
368 lr %r5,%r4 # move mask back
369 lr %r4,%r3 # move history1 parameter
370 lr %r3,%r2 # move history0 parameter
371 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
372 l %r1,sysc_sigsuspend-sysc_lit(%r13)
373 la %r14,4(%r14) # skip store of return value
374 br %r1 # branch to sys_sigsuspend
376 sys_rt_sigsuspend_glue:
377 lr %r4,%r3 # move sigsetsize parameter
378 lr %r3,%r2 # move unewset parameter
379 la %r2,SP_PTREGS(%r15) # load pt_regs as first parameter
380 l %r1,sysc_rt_sigsuspend-sysc_lit(%r13)
381 la %r14,4(%r14) # skip store of return value
382 br %r1 # branch to sys_rt_sigsuspend
384 .globl sys_call_table
386 .long sys_ni_syscall /* 0 */
391 .long sys_open /* 5 */
393 .long sys_ni_syscall /* old waitpid syscall holder */
396 .long sys_unlink /* 10 */
397 .long sys_execve_glue
401 .long sys_chmod /* 15 */
403 .long sys_ni_syscall /* old break syscall holder */
404 .long sys_ni_syscall /* old stat syscall holder */
406 .long sys_getpid /* 20 */
411 .long sys_stime /* 25 */
414 .long sys_ni_syscall /* old fstat syscall holder */
416 .long sys_utime /* 30 */
417 .long sys_ni_syscall /* old stty syscall holder */
418 .long sys_ni_syscall /* old gtty syscall holder */
421 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
426 .long sys_rmdir /* 40 */
430 .long sys_ni_syscall /* old prof syscall holder */
431 .long sys_brk /* 45 */
436 .long sys_getegid16 /* 50 */
439 .long sys_ni_syscall /* old lock syscall holder */
441 .long sys_fcntl /* 55 */
442 .long sys_ni_syscall /* old mpx syscall holder */
444 .long sys_ni_syscall /* old ulimit syscall holder */
445 .long sys_ni_syscall /* old uname syscall holder */
446 .long sys_umask /* 60 */
451 .long sys_getpgrp /* 65 */
454 .long sys_ni_syscall /* old sgetmask syscall holder */
455 .long sys_ni_syscall /* old ssetmask syscall holder */
456 .long sys_setreuid16 /* 70 */
458 .long sys_sigsuspend_glue
460 .long sys_sethostname
461 .long sys_setrlimit /* 75 */
464 .long sys_gettimeofday
465 .long sys_settimeofday
466 .long sys_getgroups16 /* 80 */
467 .long sys_setgroups16
468 .long sys_ni_syscall /* old select syscall holder */
470 .long sys_ni_syscall /* old lstat syscall holder */
471 .long sys_readlink /* 85 */
475 .long sys_ni_syscall /* old readdir syscall holder */
476 .long old_mmap /* 90 */
481 .long sys_fchown16 /* 95 */
482 .long sys_getpriority
483 .long sys_setpriority
484 .long sys_ni_syscall /* old profil syscall holder */
486 .long sys_fstatfs /* 100 */
491 .long sys_getitimer /* 105 */
495 .long sys_ni_syscall /* old uname syscall holder */
496 .long sys_ni_syscall /* 110 */ /* iopl for i386 */
498 .long sys_ni_syscall /* old "idle" system call */
499 .long sys_ni_syscall /* vm86old for i386 */
501 .long sys_swapoff /* 115 */
505 .long sys_sigreturn_glue
506 .long sys_clone_glue /* 120 */
507 .long sys_setdomainname
509 .long sys_ni_syscall /* modify_ldt for i386 */
511 .long sys_mprotect /* 125 */
512 .long sys_sigprocmask
513 .long sys_create_module
514 .long sys_init_module
515 .long sys_delete_module
516 .long sys_get_kernel_syms /* 130 */
521 .long sys_sysfs /* 135 */
522 .long sys_personality
523 .long sys_ni_syscall /* for afs_syscall */
526 .long sys_llseek /* 140 */
531 .long sys_readv /* 145 */
536 .long sys_mlock /* 150 */
540 .long sys_sched_setparam
541 .long sys_sched_getparam /* 155 */
542 .long sys_sched_setscheduler
543 .long sys_sched_getscheduler
544 .long sys_sched_yield
545 .long sys_sched_get_priority_max
546 .long sys_sched_get_priority_min /* 160 */
547 .long sys_sched_rr_get_interval
550 .long sys_setresuid16
551 .long sys_getresuid16 /* 165 */
552 .long sys_ni_syscall /* for vm86 */
553 .long sys_query_module
556 .long sys_setresgid16 /* 170 */
557 .long sys_getresgid16
559 .long sys_rt_sigreturn_glue
560 .long sys_rt_sigaction
561 .long sys_rt_sigprocmask /* 175 */
562 .long sys_rt_sigpending
563 .long sys_rt_sigtimedwait
564 .long sys_rt_sigqueueinfo
565 .long sys_rt_sigsuspend_glue
566 .long sys_pread /* 180 */
571 .long sys_capset /* 185 */
572 .long sys_sigaltstack
574 .long sys_ni_syscall /* streams1 */
575 .long sys_ni_syscall /* streams2 */
576 .long sys_vfork_glue /* 190 */
578 .long sys_ni_syscall /* FIXME: problem with sys_mmap2: 6 parms */
580 .long sys_ftruncate64
581 .long sys_stat64 /* 195 */
586 .long sys_getgid /* 200 */
591 .long sys_getgroups /* 205 */
596 .long sys_setresgid /* 210 */
601 .long sys_setfsuid /* 215 */
611 * Program check handler routine
615 pgm_handle_per: .long handle_per_exception
616 pgm_jump_table: .long pgm_check_table
617 pgm_sysc_ret: .long sysc_return
618 pgm_sysc_lit: .long sysc_lit
619 pgm_do_signal: .long do_signal
621 .globl pgm_check_handler
624 * First we need to check for a special case:
625 * Single stepping an instruction that disables the PER event mask will
626 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
627 * For a single stepped SVC the program check handler gets control after
628 * the SVC new PSW has been loaded. But we want to execute the SVC first and
629 * then handle the PER event. Therefore we update the SVC old PSW to point
630 * to the pgm_check_handler and branch to the SVC handler after we checked
631 * if we have to load the kernel stack register.
632 * For every other possible cause for PER event without the PER mask set
633 * we just ignore the PER event (FIXME: is there anything we have to do
636 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
637 jz pgm_sv # skip if not
638 tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
639 jnz pgm_sv # skip if it is
640 # ok its one of the special cases, now we need to find out which one
641 clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
643 # no interesting special case, ignore PER event
645 # it was a single stepped SVC that is causing all the trouble
648 mvi SP_SVC_STEP(%r15),1 # make SP_SVC_STEP nonzero
649 mvc SP_PGM_OLD_ILC(4,%r15),__LC_PGM_ILC # save program check information
650 j pgm_system_call # now do the svc
652 lh %r7,SP_PGM_OLD_ILC(%r15) # get ilc from stack
654 st %r0,SP_TRAP(%r15) # set new trap indicator
655 xc SP_SVC_STEP(4,%r15),SP_SVC_STEP(%r15)
657 ahi %r13,pgm_lit-. # setup base pointer
661 XC SP_SVC_STEP(4,%r15),SP_SVC_STEP(%r15)
663 ahi %r13,pgm_lit-. # setup base pointer R13 to $PGMDAT
664 lh %r7,__LC_PGM_ILC # load instruction length
666 lh %r8,__LC_PGM_INT_CODE # N.B. saved int code used later KEEP it
667 stosm 24(%r15),0x03 # reenable interrupts
670 nr %r3,%r0 # clear per-event-bit
671 je pgm_dn # none of Martins exceptions occured bypass
672 l %r9,pgm_jump_table-pgm_lit(%r13)
674 l %r9,0(%r3,%r9) # load address of handler routine
675 la %r2,SP_PTREGS(%r15) # address of register-save area
677 chi %r3,0x4 # protection-exception ?
679 l %r5,SP_PSW+4(15) # load psw addr
680 sr %r5,%r7 # substract ilc from psw
681 st %r5,SP_PSW+4(15) # store corrected psw addr
682 pgm_go: basr %r14,%r9 # branch to interrupt-handler
684 nr %r8,%r0 # check for per exception
686 la %r2,SP_PTREGS(15) # address of register-save area
687 l %r9,pgm_handle_per-pgm_lit(%r13) # load adr. of per handler
688 l %r14,pgm_sysc_ret-pgm_lit(%r13) # load adr. of system return
689 l %r13,pgm_sysc_lit-pgm_lit(%r13)
690 br %r9 # branch to handle_per_exception
692 # the backend code is the same as for sys-call
695 l %r14,pgm_sysc_ret-pgm_lit(%r13)
696 l %r13,pgm_sysc_lit-pgm_lit(%r13)
700 * IO interrupt handler routine
704 io_do_IRQ: .long do_IRQ
705 io_schedule: .long schedule
706 io_do_signal: .long do_signal
707 io_do_softirq: .long do_softirq
709 .globl io_int_handler
713 ahi %r13,io_lit-. # setup base pointer R13 to $IODAT
714 la %r2,SP_PTREGS(%r15) # address of register-save area
716 icm %r3,%r3,__LC_SUBCHANNEL_NR # load subchannel nr & extend to int
717 l %r4,__LC_IO_INT_PARM # load interuption parm
718 l %r9,io_do_IRQ-io_lit(%r13) # load address of do_IRQ
719 basr %r14,%r9 # branch to standard irq handler
722 GET_CURRENT # load pointer to task_struct to R9
723 tm SP_PSW+1(%r15),0x01 # returning to user ?
724 jz io_leave # no-> skip resched & signal
725 stosm 24(%r15),0x03 # reenable interrupts
727 # check, if bottom-half has to be done
729 l %r0,__LC_IRQ_STAT # get softirq_active
730 n %r0,__LC_IRQ_STAT+4 # and it with softirq_mask
731 jnz io_handle_bottom_half
734 # check, if reschedule is needed
736 icm %r0,15,need_resched(%r9) # get need_resched from task_struct
738 icm %r0,15,sigpending(%r9) # get sigpending from task_struct
741 stnsm 24(%r15),disable # disable I/O and ext. interrupts
745 # call do_softirq and return from syscall, if interrupt-level
748 io_handle_bottom_half:
749 l %r1,io_do_softirq-io_lit(%r13)
750 la %r14,io_return_bh-io_lit(%r13)
751 br %r1 # call do_softirq
754 # call schedule with io_return as return-address
757 l %r1,io_schedule-io_lit(%r13)
758 la %r14,io_return-io_lit(%r13)
759 br %r1 # call scheduler, return to io_return
762 # call do_signal before return
765 la %r2,SP_PTREGS(%r15) # load pt_regs
766 sr %r3,%r3 # clear *oldset
767 l %r1,io_do_signal-io_lit(%r13)
768 la %r14,io_leave-io_lit(%r13)
769 br %r1 # return point is io_leave
772 * External interrupt handler routine
776 ext_timer_int: .long do_timer_interrupt
778 ext_call_int: .long do_ext_call_interrupt
781 ext_hwc_int: .long do_hwc_interrupt
784 ext_mdisk_int: .long do_mdisk_interrupt
787 ext_iucv_int: .long do_iucv_interrupt
789 ext_io_lit: .long io_lit
790 ext_io_return: .long io_return
792 .globl ext_int_handler
796 ahi %r13,ext_lit-. # setup base pointer R13 to $EXTDAT
797 la %r2,SP_PTREGS(%r15) # address of register-save area
798 lh %r3,__LC_EXT_INT_CODE # error code
800 chi %r3,0x1202 # EXTERNAL_CALL
802 l %r9,ext_call_int-ext_lit(%r13) # load ext_call_interrupt
803 l %r14,ext_io_return-ext_lit(%r13)
804 l %r13,ext_io_lit-ext_lit(%r13)
805 br %r9 # branch to ext call handler
808 chi %r3,0x1004 # CPU_TIMER
810 l %r9,ext_timer_int-ext_lit(%r13) # load timer_interrupt
811 l %r14,ext_io_return-ext_lit(%r13)
812 l %r13,ext_io_lit-ext_lit(%r13)
813 br %r9 # branch to ext call handler
816 chi %r3,0x2401 # HWC interrupt
818 l %r9,ext_hwc_int-ext_lit(%r13) # load addr. of hwc routine
819 l %r14,ext_io_return-ext_lit(%r13)
820 l %r13,ext_io_lit-ext_lit(%r13)
821 br %r9 # branch to ext call handler
825 chi %r3,0x2603 # diag 250 (VM) interrupt
827 l %r9,ext_mdisk_int-ext_lit(%r13)
828 l %r14,ext_io_return-ext_lit(%r13)
829 l %r13,ext_io_lit-ext_lit(%r13)
830 br %r9 # branch to ext call handler
834 chi %r3,0x4000 # diag 250 (VM) interrupt
836 l %r9,ext_iucv_int-ext_lit(%r13)
837 l %r14,ext_io_return-ext_lit(%r13)
838 l %r13,ext_io_lit-ext_lit(%r13)
839 br %r9 # branch to ext call handler
843 l %r14,ext_io_return-ext_lit(%r13)
844 l %r13,ext_io_lit-ext_lit(%r13)
845 br %r14 # use backend code of io_int_handler
848 * Machine check handler routines
851 mcck_crw_pending: .long do_crw_pending
854 .globl mcck_int_handler
858 ahi %r13,mcck_lit-. # setup base pointer R13 to $MCCKDAT
859 tm __LC_MCCK_CODE+1,0x40
861 l %r1,mcck_crw_pending-mcck_lit(%r13)
862 basr %r14,%r1 # call do_crw_pending
869 * Restart interruption handler, kick starter for additional CPUs
871 .globl restart_int_handler
873 l %r15,__LC_KERNEL_STACK # load ksp
874 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
875 lam %a0,%a15,__LC_AREGS_SAVE_AREA
876 stosm 0(%r15),daton # now we can turn dat on
877 lm %r6,%r15,24(%r15) # load registers from clone
879 .long start_secondary
882 br %r14 # branch to start_secondary
885 * If we do not run with SMP enabled, let the new CPU crash ...
887 .globl restart_int_handler
891 lpsw restart_crash-restart_base(%r1)
894 .long 0x000a0000,0x00000000