2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <linux/sys.h>
17 #include <asm/unistd.h>
18 #include <asm/errno.h>
19 #include <asm/processor.h>
20 #include "ppc_asm.tmpl"
24 /* This instruction is not implemented on the PPC 601 or 603 */
36 * Returns (address we're running at) - (address we were linked at)
37 * for use before the text and data are mapped to KERNELBASE.
52 * rc = _disable_interrupts()
54 _GLOBAL(_disable_interrupts)
57 mfmsr r0 /* Get current interrupt state */
58 rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
59 li r4,0 /* Need [unsigned] value of MSR_EE */
60 ori r4,r4,MSR_EE /* Set to turn off bit */
61 andc r0,r0,r4 /* Clears bit in (r4) */
62 sync /* Some chip revs have problems here... */
63 mtmsr r0 /* Update machine state */
68 * _enable_interrupts(int state)
69 * turns on interrupts if state = 1.
71 _GLOBAL(_enable_interrupts)
72 cmpi 0,r3,0 /* turning them on? */
73 beqlr /* nothing to do if state == 0 */
76 lis r4,ppc_n_lost_interrupts@ha
77 lwz r4,ppc_n_lost_interrupts@l(r4)
78 mfmsr r3 /* Get current state */
79 ori r3,r3,MSR_EE /* Turn on 'EE' bit */
80 cmpi 0,r4,0 /* lost interrupts to process first? */
81 bne- do_lost_interrupts
82 sync /* Some chip revs have problems here... */
83 mtmsr r3 /* Update machine state */
87 * We were about to enable interrupts but we have to simulate
88 * some interrupts that were lost by enable_irq first.
90 .globl do_lost_interrupts
97 lis r4,ppc_n_lost_interrupts@ha
98 lwz r4,ppc_n_lost_interrupts@l(r4)
111 * complement mask on the msr then "or" some values on.
112 * _nmask_and_or_msr(nmask, value_to_or)
114 _GLOBAL(_nmask_and_or_msr)
115 mfmsr r0 /* Get current msr */
116 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
117 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
118 sync /* Some chip revs have problems here... */
119 mtmsr r0 /* Update machine state */
137 * Flush MMU TLB for a particular address
149 * Atomic [test&set] exchange
151 * unsigned long xchg_u32(void *ptr, unsigned long val)
152 * Changes the memory location '*ptr' to be val and returns
153 * the previous value stored there.
156 mr r5,r3 /* Save pointer */
157 10: lwarx r3,0,r5 /* Fetch old value & reserve */
158 stwcx. r4,0,r5 /* Update with new value */
159 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
163 * Try to acquire a spinlock.
164 * Only does the stwcx. if the load returned 0 - the Programming
165 * Environments Manual suggests not doing unnecessary stcwx.'s
166 * since they may inhibit forward progress by other CPUs in getting
169 _GLOBAL(__spin_trylock)
171 eieio /* prevent reordering of stores */
173 lwarx r3,0,r4 /* fetch old value, establish reservation */
174 cmpwi 0,r3,0 /* is it 0? */
175 bnelr- /* return failure if not */
176 stwcx. r5,0,r4 /* try to update with new value */
177 bne- 1f /* if we failed */
178 eieio /* prevent reordering of stores */
180 1: li r3,1 /* return non-zero for failure */
184 * Atomic add/sub/inc/dec operations
186 * void atomic_add(int c, int *v)
187 * void atomic_sub(int c, int *v)
188 * void atomic_inc(int *v)
189 * void atomic_dec(int *v)
190 * int atomic_dec_and_test(int *v)
191 * int atomic_inc_return(int *v)
192 * int atomic_dec_return(int *v)
193 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
194 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
197 10: lwarx r5,0,r4 /* Fetch old value & reserve */
198 add r5,r5,r3 /* Perform 'add' operation */
199 stwcx. r5,0,r4 /* Update with new value */
200 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
202 _GLOBAL(atomic_add_return)
203 10: lwarx r5,0,r4 /* Fetch old value & reserve */
204 add r5,r5,r3 /* Perform 'add' operation */
205 stwcx. r5,0,r4 /* Update with new value */
206 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
210 10: lwarx r5,0,r4 /* Fetch old value & reserve */
211 sub r5,r5,r3 /* Perform 'add' operation */
212 stwcx. r5,0,r4 /* Update with new value */
213 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
216 10: lwarx r5,0,r3 /* Fetch old value & reserve */
217 addi r5,r5,1 /* Perform 'add' operation */
218 stwcx. r5,0,r3 /* Update with new value */
219 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
221 _GLOBAL(atomic_inc_return)
222 10: lwarx r5,0,r3 /* Fetch old value & reserve */
223 addi r5,r5,1 /* Perform 'add' operation */
224 stwcx. r5,0,r3 /* Update with new value */
225 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
226 mr r3,r5 /* Return new value */
229 10: lwarx r5,0,r3 /* Fetch old value & reserve */
230 subi r5,r5,1 /* Perform 'add' operation */
231 stwcx. r5,0,r3 /* Update with new value */
232 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
234 _GLOBAL(atomic_dec_return)
235 10: lwarx r5,0,r3 /* Fetch old value & reserve */
236 subi r5,r5,1 /* Perform 'add' operation */
237 stwcx. r5,0,r3 /* Update with new value */
238 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
239 mr r3,r5 /* Return new value */
241 _GLOBAL(atomic_dec_and_test)
242 10: lwarx r5,0,r3 /* Fetch old value & reserve */
243 subi r5,r5,1 /* Perform 'add' operation */
244 stwcx. r5,0,r3 /* Update with new value */
245 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
246 cmpi 0,r5,0 /* Return 'true' IFF 0 */
251 _GLOBAL(atomic_clear_mask)
257 _GLOBAL(atomic_set_mask)
265 * I/O string operations
267 * insb(port, buf, len)
268 * outsb(port, buf, len)
269 * insw(port, buf, len)
270 * outsw(port, buf, len)
271 * insl(port, buf, len)
272 * outsl(port, buf, len)
273 * insw_ns(port, buf, len)
274 * outsw_ns(port, buf, len)
275 * insl_ns(port, buf, len)
276 * outsl_ns(port, buf, len)
278 * The *_ns versions don't do byte-swapping.
373 * Extended precision shifts
375 * R3/R4 has 64 bit value
379 * ashrdi3: XXXYYY/ZZZAAA -> SSSXXX/YYYZZZ
380 * ashldi3: XXXYYY/ZZZAAA -> YYYZZZ/AAA000
385 slw r7,r3,r6 /* isolate YYY */
386 srw r4,r4,r5 /* isolate ZZZ */
387 or r4,r4,r7 /* YYYZZZ */
388 sraw r3,r3,r5 /* SSSXXX */
394 srw r7,r4,r6 /* isolate ZZZ */
395 slw r4,r4,r5 /* AAA000 */
396 slw r3,r3,r5 /* YYY--- */
397 or r3,r3,r7 /* YYYZZZ */
407 mr r3,r1 /* Close enough */
439 Copyright © 1997-1998 by PowerLogix R & D, Inc.
441 This program is free software; you can redistribute it and/or modify
442 it under the terms of the GNU General Public License as published by
443 the Free Software Foundation; either version 2 of the License, or
444 (at your option) any later version.
446 This program is distributed in the hope that it will be useful,
447 but WITHOUT ANY WARRANTY; without even the implied warranty of
448 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
449 GNU General Public License for more details.
451 You should have received a copy of the GNU General Public License
452 along with this program; if not, write to the Free Software
453 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
457 - First public release, contributed by PowerLogix.
459 Author: Terry Greeniaus (tgree@phys.ualberta.ca)
460 Please e-mail updates to this file to me, thanks!
466 When setting the L2CR register, you must do a few special things. If you are enabling the
467 cache, you must perform a global invalidate. If you are disabling the cache, you must
468 flush the cache contents first. This routine takes care of doing these things. When first
469 enabling the cache, make sure you pass in the L2CR you want, as well as passing in the
470 global invalidate bit set. A global invalidate will only be performed if the L2I bit is set
471 in applyThis. When enabling the cache, you should also set the L2E bit in applyThis. If you
472 want to modify the L2CR contents after the cache has been enabled, the recommended
473 procedure is to first call __setL2CR(0) to disable the cache and then call it again with
474 the new values for L2CR. Examples:
476 _setL2CR(0) - disables the cache
477 _setL2CR(0xB3A04000) - enables my G3 upgrade card:
478 - L2E set to turn on the cache
481 - L2RAM set to pipelined syncronous late-write
482 - L2I set to perform a global invalidation
484 - L2DF set because this upgrade card requires it
486 A similar call should work for your card. You need to know the correct setting for your
487 card and then place them in the fields I have outlined above. Other fields support optional
488 features, such as L2DO which caches only data, or L2TS which causes cache pushes from
489 the L1 cache to go to the L2 cache instead of to main memory.
492 /* Make sure this is a 750 chip */
494 rlwinm r4,r4,16,16,31
501 /* Get the current enable bit of the L2CR into r4 */
505 /* See if we want to perform a global inval this time. */
506 rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
507 rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
508 rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
509 rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
510 or r3,r3,r4 /* Keep the enable bit the same as it was for now. */
511 bne dontDisableCache /* Only disable the cache if L2CRApply has the enable bit off */
514 /* Disable the cache. First, we turn off data relocation. */
516 rlwinm r4,r7,0,28,26 /* Turn off DR bit */
517 rlwinm r4,r4,0,17,15 /* Turn off EE bit - an external exception while we are flushing
518 the cache is fatal (comment this line and see!) */
524 Now, read the first 2MB of memory to put new data in the cache.
525 (Actually we only need the size of the L2 cache plus
526 the size of the L1 cache, but 2MB will cover everything just to be safe).
533 addi r4,r4,0x0020 /* Go to start of next cache line */
536 /* Now, flush the first 2MB of memory */
543 addi r4,r4,0x0020 /* Go to start of next cache line */
546 /* Turn off the L2CR enable bit. */
549 /* Reenable data relocation. */
555 /* Set up the L2CR configuration bits */
562 /* Perform a global invalidation */
567 invalCompleteLoop: /* Wait for the invalidation to complete */
569 rlwinm. r4,r3,0,31,31
570 bne invalCompleteLoop
572 rlwinm r3,r3,0,11,9; /* Turn off the L2I bit */
578 /* See if we need to enable the cache */
583 /* Enable the cache */
590 /* Make sure this is a 750 chip */
592 rlwinm r3,r3,16,16,31
597 /* Return the L2CR contents */
601 /* --- End of PowerLogix code ---
616 * These are used in the alignment trap handler when emulating
617 * single-precision loads and stores.
618 * We restore and save the fpscr so the task gets the same result
619 * and exceptions as if the cpu had performed the load or store.
623 lfd 0,-4(r5) /* load up fpscr value */
627 mffs 0 /* save new fpscr value */
633 lfd 0,-4(r5) /* load up fpscr value */
637 mffs 0 /* save new fpscr value */
641 .globl __clear_msr_me
643 mfmsr r0 /* Get current interrupt state */
646 andc r0,r0,r3 /* Clears bit in (r4) */
647 sync /* Some chip revs have problems here */
648 mtmsr r0 /* Update machine state */
652 * Create a kernel thread
653 * kernel_thread(fn, arg, flags)
655 _GLOBAL(kernel_thread)
656 mr r6,r3 /* function */
657 ori r3,r5,CLONE_VM /* flags */
660 cmpi 0,r3,0 /* parent or child? */
661 bnelr /* return if parent */
662 li r0,0 /* clear out p->tss.regs */
663 stw r0,TSS+PT_REGS(r2) /* since we don't have user ctx */
664 mtlr r6 /* fn addr in lr */
665 mr r3,r4 /* load arg and call fn */
667 li r0,__NR_exit /* exit after child exits */
671 #define SYSCALL(name) \
677 stw r3,errno@l(r4); \
681 #define __NR__exit __NR_exit
693 SYSCALL(delete_module)
698 /* Why isn't this a) automatic, b) written in 'C'? */
701 .globl sys_call_table
703 .long sys_ni_syscall /* 0 - old "setup()" system call */
708 .long sys_open /* 5 */
713 .long sys_unlink /* 10 */
718 .long sys_chmod /* 15 */
720 .long sys_ni_syscall /* old break syscall holder */
723 .long sys_getpid /* 20 */
728 .long sys_stime /* 25 */
733 .long sys_utime /* 30 */
734 .long sys_ni_syscall /* old stty syscall holder */
735 .long sys_ni_syscall /* old gtty syscall holder */
738 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
743 .long sys_rmdir /* 40 */
747 .long sys_ni_syscall /* old prof syscall holder */
748 .long sys_brk /* 45 */
753 .long sys_getegid /* 50 */
755 .long sys_umount /* recycled never used phys() */
756 .long sys_ni_syscall /* old lock syscall holder */
758 .long sys_fcntl /* 55 */
759 .long sys_ni_syscall /* old mpx syscall holder */
761 .long sys_ni_syscall /* old ulimit syscall holder */
763 .long sys_umask /* 60 */
768 .long sys_getpgrp /* 65 */
773 .long sys_setreuid /* 70 */
777 .long sys_sethostname
778 .long sys_setrlimit /* 75 */
781 .long sys_gettimeofday
782 .long sys_settimeofday
783 .long sys_getgroups /* 80 */
788 .long sys_readlink /* 85 */
793 .long sys_mmap /* 90 */
798 .long sys_fchown /* 95 */
799 .long sys_getpriority
800 .long sys_setpriority
801 .long sys_ni_syscall /* old profil syscall holder */
803 .long sys_fstatfs /* 100 */
808 .long sys_getitimer /* 105 */
813 .long sys_iopl /* 110 */
818 .long sys_swapoff /* 115 */
823 .long sys_clone /* 120 */
824 .long sys_setdomainname
828 .long sys_mprotect /* 125 */
829 .long sys_sigprocmask
830 .long sys_create_module
831 .long sys_init_module
832 .long sys_delete_module
833 .long sys_get_kernel_syms /* 130 */
838 .long sys_sysfs /* 135 */
839 .long sys_personality
840 .long sys_ni_syscall /* for afs_syscall */
843 .long sys_llseek /* 140 */
848 .long sys_readv /* 145 */
853 .long sys_mlock /* 150 */
857 .long sys_sched_setparam
858 .long sys_sched_getparam /* 155 */
859 .long sys_sched_setscheduler
860 .long sys_sched_getscheduler
861 .long sys_sched_yield
862 .long sys_sched_get_priority_max
863 .long sys_sched_get_priority_min /* 160 */
864 .long sys_sched_rr_get_interval
868 .long sys_getresuid /* 165 */
869 .long sys_query_module
877 .long sys_getresgid /* 170 */
879 .long sys_rt_sigreturn
880 .long sys_rt_sigaction
881 .long sys_rt_sigprocmask
882 .long sys_rt_sigpending /* 175 */
883 .long sys_rt_sigtimedwait
884 .long sys_rt_sigqueueinfo
885 .long sys_rt_sigsuspend
887 .long sys_pwrite /* 180 */
892 .long sys_sigaltstack /* 185 */
894 .long sys_ni_syscall /* streams1 */
895 .long sys_ni_syscall /* streams2 */
897 .space (NR_syscalls-183)*4