1 /* $Id: entry.S,v 1.117 2000/07/11 02:21:12 davem Exp $
2 * arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/config.h>
11 #include <linux/errno.h>
16 #include <asm/ptrace.h>
18 #include <asm/signal.h>
19 #include <asm/pgtable.h>
20 #include <asm/processor.h>
21 #include <asm/visasm.h>
23 /* #define SYSCALL_TRACING */
27 #define NR_SYSCALLS 256 /* Each OS is different... */
32 .globl sparc64_vpte_patchme1
33 .globl sparc64_vpte_patchme2
35 sparc64_vpte_patchme1:
36 sethi %hi(0), %g5 ! This has to be patched
37 sparc64_vpte_patchme2:
38 or %g5, %lo(0), %g5 ! This is patched too
39 ba,pt %xcc, sparc64_kpte_continue ! Part of dtlb_backend
40 add %g1, %g1, %g1 ! Finish PMD offset adjustment
42 /* This is trivial with the new code... */
45 ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g5 ! Load Group
46 sethi %hi(TSTATE_PEF), %g4 ! IEU0
47 wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
48 andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
49 be,a,pt %icc, 1f ! CTI
51 ldub [%g6 + AOFF_task_thread + AOFF_thread_gsr], %g7 ! Load Group
52 1: andcc %g5, FPRS_DL, %g0 ! IEU1
55 andcc %g5, FPRS_DU, %g0 ! IEU1 Group
86 b,pt %xcc, fpdis_exit2
88 1: mov SECONDARY_CONTEXT, %g3
89 add %g6, AOFF_task_fpregs + 0x80, %g1
92 ldxa [%g3] ASI_DMMU, %g5
93 add %g6, AOFF_task_fpregs + 0xc0, %g2
94 stxa %g0, [%g3] ASI_DMMU
98 membar #StoreLoad | #LoadLoad
99 ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-(
100 ldda [%g2] ASI_BLK_S, %f48
111 b,pt %xcc, fpdis_exit
113 2: andcc %g5, FPRS_DU, %g0
116 mov SECONDARY_CONTEXT, %g3
118 ldxa [%g3] ASI_DMMU, %g5
119 add %g6, AOFF_task_fpregs, %g1
120 stxa %g0, [%g3] ASI_DMMU
121 add %g6, AOFF_task_fpregs + 0x40, %g2
122 faddd %f32, %f34, %f36
123 fmuld %f32, %f34, %f38
125 membar #StoreLoad | #LoadLoad
126 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
127 ldda [%g2] ASI_BLK_S, %f16
128 faddd %f32, %f34, %f40
129 fmuld %f32, %f34, %f42
130 faddd %f32, %f34, %f44
131 fmuld %f32, %f34, %f46
132 faddd %f32, %f34, %f48
133 fmuld %f32, %f34, %f50
134 faddd %f32, %f34, %f52
135 fmuld %f32, %f34, %f54
136 faddd %f32, %f34, %f56
137 fmuld %f32, %f34, %f58
138 faddd %f32, %f34, %f60
139 fmuld %f32, %f34, %f62
140 b,pt %xcc, fpdis_exit
142 3: mov SECONDARY_CONTEXT, %g3
143 add %g6, AOFF_task_fpregs, %g1
144 ldxa [%g3] ASI_DMMU, %g5
146 stxa %g0, [%g3] ASI_DMMU
148 membar #StoreLoad | #LoadLoad
149 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
150 ldda [%g1 + %g2] ASI_BLK_S, %f16
152 ldda [%g1] ASI_BLK_S, %f32
153 ldda [%g1 + %g2] ASI_BLK_S, %f48
156 stxa %g5, [%g3] ASI_DMMU
160 ldx [%g6 + AOFF_task_thread + AOFF_thread_xfsr], %fsr
162 or %g3, %g4, %g3 ! anal...
164 wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits
170 ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
171 stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
174 stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
176 stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_gsr]
177 mov SECONDARY_CONTEXT, %g3
178 add %g6, AOFF_task_fpregs, %g2
179 ldxa [%g3] ASI_DMMU, %g5
180 stxa %g0, [%g3] ASI_DMMU
182 membar #StoreStore | #LoadStore
183 andcc %g1, FPRS_DL, %g0
186 stda %f0, [%g2] ASI_BLK_S
187 stda %f16, [%g2 + %g3] ASI_BLK_S
188 andcc %g1, FPRS_DU, %g0
191 stda %f32, [%g2] ASI_BLK_S
192 stda %f48, [%g2 + %g3] ASI_BLK_S
193 5: mov SECONDARY_CONTEXT, %g1
195 stxa %g5, [%g1] ASI_DMMU
200 /* The registers for cross calls will be:
202 * DATA 0: [low 32-bits] Address of function to call, jmp to this
203 * [high 32-bits] MMU Context Argument 0, place in %g5
204 * DATA 1: Address Argument 1, place in %g6
205 * DATA 2: Address Argument 2, place in %g7
207 * With this method we can do most of the cross-call tlb/cache
208 * flushing very quickly.
210 * Current CPU's IRQ worklist table is locked into %g1,
218 ldxa [%g3 + %g0] ASI_UDB_INTR_R, %g3
219 sethi %hi(KERNBASE), %g4
221 bgeu,pn %xcc, do_ivec_xcall
223 stxa %g0, [%g0] ASI_INTR_RECEIVE
226 sethi %hi(ivector_table), %g2
228 or %g2, %lo(ivector_table), %g2
230 ldx [%g3 + 0x08], %g2 /* irq_info */
231 ldub [%g3 + 0x04], %g4 /* pil */
232 brz,pn %g2, do_ivec_spurious
237 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
238 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
239 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */
240 wr %g2, 0x0, %set_softint
245 ldxa [%g1 + %g0] ASI_UDB_INTR_R, %g1
248 ldxa [%g7 + %g0] ASI_UDB_INTR_R, %g7
249 stxa %g0, [%g0] ASI_INTR_RECEIVE
255 stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */
258 wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate
261 109: or %g7, %lo(109b), %g7
262 call catch_disabled_ivec
263 add %sp, STACK_BIAS + REGWIN_SZ, %o0
267 .globl save_alternate_globals
268 save_alternate_globals: /* %o0 = save_area */
270 andn %o5, PSTATE_IE, %o1
271 wrpr %o1, PSTATE_AG, %pstate
272 stx %g0, [%o0 + 0x00]
273 stx %g1, [%o0 + 0x08]
274 stx %g2, [%o0 + 0x10]
275 stx %g3, [%o0 + 0x18]
276 stx %g4, [%o0 + 0x20]
277 stx %g5, [%o0 + 0x28]
278 stx %g6, [%o0 + 0x30]
279 stx %g7, [%o0 + 0x38]
280 wrpr %o1, PSTATE_IG, %pstate
281 stx %g0, [%o0 + 0x40]
282 stx %g1, [%o0 + 0x48]
283 stx %g2, [%o0 + 0x50]
284 stx %g3, [%o0 + 0x58]
285 stx %g4, [%o0 + 0x60]
286 stx %g5, [%o0 + 0x68]
287 stx %g6, [%o0 + 0x70]
288 stx %g7, [%o0 + 0x78]
289 wrpr %o1, PSTATE_MG, %pstate
290 stx %g0, [%o0 + 0x80]
291 stx %g1, [%o0 + 0x88]
292 stx %g2, [%o0 + 0x90]
293 stx %g3, [%o0 + 0x98]
294 stx %g4, [%o0 + 0xa0]
295 stx %g5, [%o0 + 0xa8]
296 stx %g6, [%o0 + 0xb0]
297 stx %g7, [%o0 + 0xb8]
298 wrpr %o5, 0x0, %pstate
302 .globl restore_alternate_globals
303 restore_alternate_globals: /* %o0 = save_area */
305 andn %o5, PSTATE_IE, %o1
306 wrpr %o1, PSTATE_AG, %pstate
307 ldx [%o0 + 0x00], %g0
308 ldx [%o0 + 0x08], %g1
309 ldx [%o0 + 0x10], %g2
310 ldx [%o0 + 0x18], %g3
311 ldx [%o0 + 0x20], %g4
312 ldx [%o0 + 0x28], %g5
313 ldx [%o0 + 0x30], %g6
314 ldx [%o0 + 0x38], %g7
315 wrpr %o1, PSTATE_IG, %pstate
316 ldx [%o0 + 0x40], %g0
317 ldx [%o0 + 0x48], %g1
318 ldx [%o0 + 0x50], %g2
319 ldx [%o0 + 0x58], %g3
320 ldx [%o0 + 0x60], %g4
321 ldx [%o0 + 0x68], %g5
322 ldx [%o0 + 0x70], %g6
323 ldx [%o0 + 0x78], %g7
324 wrpr %o1, PSTATE_MG, %pstate
325 ldx [%o0 + 0x80], %g0
326 ldx [%o0 + 0x88], %g1
327 ldx [%o0 + 0x90], %g2
328 ldx [%o0 + 0x98], %g3
329 ldx [%o0 + 0xa0], %g4
330 ldx [%o0 + 0xa8], %g5
331 ldx [%o0 + 0xb0], %g6
332 ldx [%o0 + 0xb8], %g7
333 wrpr %o5, 0x0, %pstate
339 ldx [%o0 + PT_V9_TSTATE], %o1
343 stx %o1, [%o0 + PT_V9_G1]
345 ldx [%o0 + PT_V9_TSTATE], %o1
346 ldx [%o0 + PT_V9_G1], %o2
347 or %g0, %ulo(TSTATE_ICC), %o3
354 stx %o1, [%o0 + PT_V9_TSTATE]
356 .globl utrap, utrap_ill
357 utrap: brz,pn %g1, etrap
362 andn %l6, TSTATE_CWP, %l6
363 wrpr %l6, %l7, %tstate
370 add %sp, STACK_BIAS + REGWIN_SZ, %o0
374 #ifdef CONFIG_BLK_DEV_FD
375 .globl floppy_hardint
377 wr %g0, (1 << 11), %clear_softint
378 sethi %hi(doing_pdma), %g1
379 ld [%g1 + %lo(doing_pdma)], %g2
380 brz,pn %g2, floppy_dosoftint
381 sethi %hi(fdc_status), %g3
382 ldx [%g3 + %lo(fdc_status)], %g3
383 sethi %hi(pdma_vaddr), %g5
384 ldx [%g5 + %lo(pdma_vaddr)], %g4
385 sethi %hi(pdma_size), %g5
386 ldx [%g5 + %lo(pdma_size)], %g5
389 lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
391 be,pn %icc, floppy_fifo_emptied
393 be,pn %icc, floppy_overrun
395 be,pn %icc, floppy_write
399 lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
403 bne,pn %xcc, next_byte
406 b,pt %xcc, floppy_tdone
413 stba %g7, [%g3] ASI_PHYS_BYPASS_EC_E
415 bne,pn %xcc, next_byte
419 sethi %hi(pdma_vaddr), %g1
420 stx %g4, [%g1 + %lo(pdma_vaddr)]
421 sethi %hi(pdma_size), %g1
422 stx %g5, [%g1 + %lo(pdma_size)]
423 sethi %hi(auxio_register), %g1
424 ldx [%g1 + %lo(auxio_register)], %g7
430 nop; nop; nop; nop; nop; nop;
431 nop; nop; nop; nop; nop; nop;
434 sethi %hi(doing_pdma), %g1
435 b,pt %xcc, floppy_dosoftint
436 st %g0, [%g1 + %lo(doing_pdma)]
439 sethi %hi(pdma_vaddr), %g1
440 stx %g4, [%g1 + %lo(pdma_vaddr)]
441 sethi %hi(pdma_size), %g1
442 stx %g5, [%g1 + %lo(pdma_size)]
443 sethi %hi(irq_action), %g1
444 or %g1, %lo(irq_action), %g1
445 ldx [%g1 + (11 << 3)], %g3 ! irqaction[floppy_irq]
446 ldx [%g3 + 0x10], %g4 ! action->mask == ino_bucket ptr
447 ldx [%g4 + 0x10], %g4 ! bucket->iclr
448 stwa %g0, [%g4] ASI_PHYS_BYPASS_EC_E ! ICLR_IDLE
449 membar #Sync ! probably not needed...
453 sethi %hi(pdma_vaddr), %g1
454 stx %g4, [%g1 + %lo(pdma_vaddr)]
455 sethi %hi(pdma_size), %g1
456 stx %g5, [%g1 + %lo(pdma_size)]
457 sethi %hi(doing_pdma), %g1
458 st %g0, [%g1 + %lo(doing_pdma)]
465 109: or %g7, %lo(109b), %g7
469 call sparc_floppy_irq
470 add %sp, STACK_BIAS + REGWIN_SZ, %o2
475 #endif /* CONFIG_BLK_DEV_FD */
477 /* XXX Here is stuff we still need to write... -DaveM XXX */
478 .globl netbsd_syscall
483 /* These next few routines must be sure to clear the
484 * SFSR FaultValid bit so that the fast tlb data protection
485 * handler does not flush the wrong context and lock up the
488 .globl __do_data_access_exception
489 .globl __do_data_access_exception_tl1
490 __do_data_access_exception_tl1:
492 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
497 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
498 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
499 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
501 bgu,pn %icc, winfix_dax
505 or %g7, %lo(109f), %g7 ! Merge in below
506 __do_data_access_exception:
508 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
511 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
512 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
513 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
517 109: or %g7, %lo(109b), %g7
520 call data_access_exception
521 add %sp, STACK_BIAS + REGWIN_SZ, %o0
525 .globl __do_instruction_access_exception
526 .globl __do_instruction_access_exception_tl1
527 __do_instruction_access_exception_tl1:
529 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
532 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
533 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
534 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
538 or %g7, %lo(109f), %g7 ! Merge in below
539 __do_instruction_access_exception:
541 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
544 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
545 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
546 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
550 109: or %g7, %lo(109b), %g7
553 call instruction_access_exception
554 add %sp, STACK_BIAS + REGWIN_SZ, %o0
558 /* This is the trap handler entry point for ECC correctable
559 * errors. They are corrected, but we listen for the trap
560 * so that the event can be logged.
562 * Disrupting errors are either:
563 * 1) single-bit ECC errors during UDB reads to system
565 * 2) data parity errors during write-back events
567 * As far as I can make out from the manual, the CEE trap
568 * is only for correctable errors during memory read
569 * accesses by the front-end of the processor.
571 * The code below is only for trap level 1 CEE events,
572 * as it is the only situation where we can safely record
573 * and log. For trap level >1 we just clear the CE bit
574 * in the AFSR and return.
577 /* Our trap handling infrastructure allows us to preserve
578 * two 64-bit values during etrap for arguments to
579 * subsequent C code. Therefore we encode the information
582 * value 1) Full 64-bits of AFAR
583 * value 2) Low 33-bits of AFSR, then bits 33-->42
584 * are UDBL error status and bits 43-->52
585 * are UDBH error status
590 ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR
591 ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR
592 sllx %g1, 31, %g1 ! Clear reserved bits
593 srlx %g1, 31, %g1 ! in AFSR
595 /* NOTE: UltraSparc-I/II have high and low UDB error
596 * registers, corresponding to the two UDB units
597 * present on those chips. UltraSparc-IIi only
598 * has a single UDB, called "SDB" in the manual.
599 * For IIi the upper UDB register always reads
600 * as zero so for our purposes things will just
601 * work with the checks below.
603 ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status
604 andcc %g3, (1 << 8), %g4 ! Check CE bit
605 sllx %g3, (64 - 10), %g3 ! Clear reserved bits
606 srlx %g3, (64 - 10), %g3 ! in UDB-Low error status
608 sllx %g3, (33 + 0), %g3 ! Shift up to encoding area
609 or %g1, %g3, %g1 ! Or it in
610 be,pn %xcc, 1f ! Branch if CE bit was clear
612 stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL
613 membar #Sync ! Synchronize ASI stores
614 1: mov 0x18, %g5 ! Addr of UDB-High error status
615 ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it
617 andcc %g3, (1 << 8), %g4 ! Check CE bit
618 sllx %g3, (64 - 10), %g3 ! Clear reserved bits
619 srlx %g3, (64 - 10), %g3 ! in UDB-High error status
620 sllx %g3, (33 + 10), %g3 ! Shift up to encoding area
621 or %g1, %g3, %g1 ! Or it in
622 be,pn %xcc, 1f ! Branch if CE bit was clear
626 stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH
627 membar #Sync ! Synchronize ASI stores
628 1: mov 1, %g5 ! AFSR CE bit is
629 sllx %g5, 20, %g5 ! bit 20
630 stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR
631 membar #Sync ! Synchronize ASI stores
632 sllx %g2, (64 - 41), %g2 ! Clear reserved bits
633 srlx %g2, (64 - 41), %g2 ! in latched AFAR
635 andn %g2, 0x0f, %g2 ! Finish resv bit clearing
636 mov %g1, %g4 ! Move AFSR+UDB* into save reg
637 mov %g2, %g5 ! Move AFAR into save reg
640 ba,pt %xcc, etrap_irq
646 add %sp, STACK_BIAS + REGWIN_SZ, %o2
647 ba,a,pt %xcc, rtrap_clr_l6
652 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
656 109: or %g7, %lo(109b), %g7
658 add %sp, STACK_BIAS + REGWIN_SZ, %o0
667 /* Setup %g4/%g5 now as they are used in the
672 ldxa [%g4] ASI_DMMU, %g4
673 ldxa [%g3] ASI_DMMU, %g5
674 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
676 bgu,pn %icc, winfix_dax
679 1: sethi %hi(109f), %g7
681 109: or %g7, %lo(109b), %g7
684 call mem_address_unaligned
685 add %sp, STACK_BIAS + REGWIN_SZ, %o0
693 ldxa [%g4] ASI_DMMU, %g5
694 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
697 ldxa [%g4] ASI_DMMU, %g4
699 109: or %g7, %lo(109b), %g7
703 add %sp, STACK_BIAS + REGWIN_SZ, %o0
711 ldxa [%g4] ASI_DMMU, %g5
712 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
715 ldxa [%g4] ASI_DMMU, %g4
717 109: or %g7, %lo(109b), %g7
721 add %sp, STACK_BIAS + REGWIN_SZ, %o0
725 .globl breakpoint_trap
727 call sparc_breakpoint
728 add %sp, STACK_BIAS + REGWIN_SZ, %o0
732 #if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
733 defined(CONFIG_SOLARIS_EMUL_MODULE)
734 /* SunOS uses syscall zero as the 'indirect syscall' it looks
735 * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
736 * This is complete brain damage.
745 sethi %hi(sunos_nosys), %l6
747 or %l6, %lo(sunos_nosys), %l6
748 1: sethi %hi(sunos_sys_table), %l7
749 or %l7, %lo(sunos_sys_table), %l7
750 lduw [%l7 + %o0], %l6
764 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
765 b,pt %xcc, ret_sys_call
766 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
768 /* SunOS getuid() returns uid in %o0 and euid in %o1 */
774 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
775 b,pt %xcc, ret_sys_call
776 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
778 /* SunOS getgid() returns gid in %o0 and egid in %o1 */
784 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
785 b,pt %xcc, ret_sys_call
786 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
789 /* SunOS's execv() call only specifies the argv argument, the
790 * environment settings are the same as the calling processes.
792 .globl sunos_execv, sys_execve, sys32_execve
794 sethi %hi(sparc_execve), %g1
795 ba,pt %xcc, execve_merge
796 or %g1, %lo(sparc_execve), %g1
798 stx %g0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I2]
800 sethi %hi(sparc32_execve), %g1
801 or %g1, %lo(sparc32_execve), %g1
805 add %sp, STACK_BIAS + REGWIN_SZ, %o0
807 .globl sys_pipe, sys_sigpause, sys_nis_syscall
808 .globl sys_sigsuspend, sys_rt_sigsuspend, sys32_rt_sigsuspend
809 .globl sys_rt_sigreturn
810 .globl sys32_sigreturn, sys32_rt_sigreturn
811 .globl sys32_execve, sys_ptrace
812 .globl sys_sigaltstack, sys32_sigaltstack
813 .globl sys32_sigstack
815 sys_pipe: ba,pt %xcc, sparc_pipe
816 add %sp, STACK_BIAS + REGWIN_SZ, %o0
817 sys_nis_syscall:ba,pt %xcc, c_sys_nis_syscall
818 add %sp, STACK_BIAS + REGWIN_SZ, %o0
820 ba,pt %xcc, sparc_memory_ordering
821 add %sp, STACK_BIAS + REGWIN_SZ, %o1
822 sys_sigaltstack:ba,pt %xcc, do_sigaltstack
823 add %i6, STACK_BIAS, %o2
824 sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
827 ba,pt %xcc, do_sys32_sigaltstack
831 sys_sigsuspend: add %sp, STACK_BIAS + REGWIN_SZ, %o0
835 sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
836 add %sp, STACK_BIAS + REGWIN_SZ, %o2
837 call do_rt_sigsuspend
840 sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
842 add %sp, STACK_BIAS + REGWIN_SZ, %o2
843 call do_rt_sigsuspend32
845 /* NOTE: %o0 has a correct value already */
846 sys_sigpause: add %sp, STACK_BIAS + REGWIN_SZ, %o1
851 add %sp, STACK_BIAS + REGWIN_SZ, %o0
856 add %sp, STACK_BIAS + REGWIN_SZ, %o0
861 add %sp, STACK_BIAS + REGWIN_SZ, %o0
862 call do_rt_sigreturn32
865 sys_ptrace: add %sp, STACK_BIAS + REGWIN_SZ, %o0
870 1: ldx [%curptr + AOFF_task_ptrace], %l5
880 /* This is how fork() was meant to be done, 8 instruction entry.
882 * I questioned the following code briefly, let me clear things
883 * up so you must not reason on it like I did.
885 * Know the fork_kpsr etc. we use in the sparc32 port? We don't
886 * need it here because the only piece of window state we copy to
887 * the child is the CWP register. Even if the parent sleeps,
888 * we are safe because we stuck it into pt_regs of the parent
889 * so it will not change.
891 * XXX This raises the question, whether we can do the same on
892 * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The
893 * XXX answer is yes. We stick fork_kpsr in UREG_G0 and
894 * XXX fork_kwim in UREG_G1 (global registers are considered
895 * XXX volatile across a system call in the sparc ABI I think
896 * XXX if it isn't we can use regs->y instead, anyone who depends
897 * XXX upon the Y register being preserved across a fork deserves
900 * In fact we should take advantage of that fact for other things
901 * during system calls...
903 .globl sys_fork, sys_vfork, sys_clone, sparc_exit
904 .globl ret_from_syscall
906 sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */
907 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
908 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
909 ba,pt %xcc, sys_clone
916 add %sp, STACK_BIAS + REGWIN_SZ, %o2
918 /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
919 * %o7 for us. Check performance counter stuff too.
921 andn %o7, SPARC_FLAG_NEWCHILD, %l0
922 mov %g5, %o0 /* 'prev' */
924 stb %l0, [%g6 + AOFF_task_thread + AOFF_thread_flags]
925 andcc %l0, SPARC_FLAG_PERFCTR, %g0
928 ldx [%g6 + AOFF_task_thread + AOFF_thread_pcr_reg], %o7
931 /* Blackbird errata workaround. See commentary in
932 * smp.c:smp_percpu_timer_interrupt() for more
938 99: wr %g0, %g0, %pic
941 1: b,pt %xcc, ret_sys_call
942 ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0], %o0
943 sparc_exit: rdpr %otherwin, %g1
944 wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate
947 wrpr %g3, 0x0, %cansave
948 wrpr %g0, 0x0, %otherwin
949 wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate
951 stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
953 linux_sparc_ni_syscall:
954 sethi %hi(sys_ni_syscall), %l7
956 or %l7, %lo(sys_ni_syscall), %l7
958 linux_syscall_trace32:
979 /* Linux 32-bit and SunOS system calls enter here... */
981 .globl linux_sparc_syscall32
982 linux_sparc_syscall32:
983 /* Direct access to user regs, must faster. */
984 cmp %g1, NR_SYSCALLS ! IEU1 Group
985 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
986 srl %i0, 0, %o0 ! IEU0
987 sll %g1, 2, %l4 ! IEU0 Group
988 #ifdef SYSCALL_TRACING
989 add %sp, STACK_BIAS + REGWIN_SZ, %o1
990 call syscall_trace_entry
995 lduw [%l7 + %l4], %l7 ! Load
996 srl %i1, 0, %o1 ! IEU0 Group
997 ldx [%curptr + AOFF_task_ptrace], %l0 ! Load
1000 srl %i2, 0, %o2 ! IEU0 Group
1001 andcc %l0, 0x02, %g0 ! IEU0 Group
1002 bne,pn %icc, linux_syscall_trace32 ! CTI
1004 call %l7 ! CTI Group brk forced
1005 srl %i3, 0, %o3 ! IEU0
1008 /* Linux native and SunOS system calls enter here... */
1010 .globl linux_sparc_syscall, ret_sys_call
1011 linux_sparc_syscall:
1012 /* Direct access to user regs, must faster. */
1013 cmp %g1, NR_SYSCALLS ! IEU1 Group
1014 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
1016 sll %g1, 2, %l4 ! IEU0 Group
1017 #ifdef SYSCALL_TRACING
1018 add %sp, STACK_BIAS + REGWIN_SZ, %o1
1019 call syscall_trace_entry
1024 lduw [%l7 + %l4], %l7 ! Load
1025 4: mov %i2, %o2 ! IEU0 Group
1026 ldx [%curptr + AOFF_task_ptrace], %l0 ! Load
1029 mov %i4, %o4 ! IEU0 Group
1030 andcc %l0, 0x02, %g0 ! IEU1 Group+1 bubble
1031 bne,pn %icc, linux_syscall_trace ! CTI Group
1033 2: call %l7 ! CTI Group brk forced
1037 3: stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
1039 #ifdef SYSCALL_TRACING
1040 call syscall_trace_exit
1041 add %sp, STACK_BIAS + REGWIN_SZ, %o1
1043 ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE], %g3
1044 ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC], %l1 ! pc = npc
1046 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
1047 cmp %o0, -ENOIOCTLCMD
1051 andcc %l0, 0x02, %l6
1052 andn %g3, %g2, %g3 /* System call success, clear Carry condition code. */
1053 stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE]
1054 bne,pn %icc, linux_syscall_trace2
1055 add %l1, 0x4, %l2 ! npc = npc+4
1056 stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC]
1057 ba,pt %xcc, rtrap_clr_l6
1058 stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC]
1061 /* System call failure, set Carry condition code.
1062 * Also, get abs(errno) to return to the process.
1066 stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
1068 stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE]
1069 bne,pn %icc, linux_syscall_trace2
1070 add %l1, 0x4, %l2 !npc = npc+4
1071 stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC]
1074 stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC]
1075 linux_syscall_trace2:
1078 stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC]
1080 stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC]
1083 .globl __flushw_user
1088 1: save %sp, -128, %sp
1094 restore %g0, %g0, %g0
1098 /* This need not obtain the xtime_lock as it is coded in
1099 * an implicitly SMP safe way already.
1102 .globl do_gettimeofday
1103 do_gettimeofday: /* %o0 = timevalp */
1104 /* Load doubles must be used on xtime so that what we get
1105 * is guarenteed to be atomic, this is why we can run this
1106 * with interrupts on full blast. Don't touch this... -DaveM
1108 * Note with time_t changes to the timeval type, I must now use
1109 * nucleus atomic quad 128-bit loads.
1111 sethi %hi(timer_tick_offset), %g3
1112 sethi %hi(xtime), %g2
1113 sethi %hi(timer_tick_compare), %g1
1114 ldx [%g3 + %lo(timer_tick_offset)], %g3
1115 or %g2, %lo(xtime), %g2
1116 or %g1, %lo(timer_tick_compare), %g1
1117 1: ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %o4
1120 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %o2
1125 sethi %hi(wall_jiffies), %o2
1126 sethi %hi(jiffies), %o3
1127 ldx [%o2 + %lo(wall_jiffies)], %o2
1128 ldx [%o3 + %lo(jiffies)], %o3
1130 sethi %hi(timer_ticks_per_usec_quotient), %o3
1132 ldx [%o3 + %lo(timer_ticks_per_usec_quotient)], %o3
1137 sethi %hi(10000), %g2
1138 or %g2, %lo(10000), %g2
1140 1: sethi %hi(1000000), %o2
1142 or %o2, %lo(1000000), %o2
1146 stx %o4, [%o0 + 0x0]
1149 stx %o4, [%o0 + 0x0]