2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <linux/sys.h>
17 #include <asm/unistd.h>
18 #include <asm/errno.h>
19 #include <asm/processor.h>
25 LG_CACHE_LINE_SIZE = 5
28 LG_CACHE_LINE_SIZE = 4
29 #endif /* CONFIG_8xx */
34 * Returns (address we're running at) - (address we were linked at)
35 * for use before the text and data are mapped to KERNELBASE.
49 mfmsr r0 /* Get current interrupt state */
50 rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
51 li r4,0 /* Need [unsigned] value of MSR_EE */
52 ori r4,r4,MSR_EE /* Set to turn off bit */
53 andc r0,r0,r4 /* Clears bit in (r4) */
54 sync /* Some chip revs have problems here... */
55 mtmsr r0 /* Update machine state */
59 lis r4,ppc_n_lost_interrupts@ha
60 lwz r4,ppc_n_lost_interrupts@l(r4)
61 mfmsr r3 /* Get current state */
62 ori r3,r3,MSR_EE /* Turn on 'EE' bit */
63 cmpi 0,r4,0 /* lost interrupts to process first? */
64 bne- do_lost_interrupts
65 sync /* Some chip revs have problems here... */
66 mtmsr r3 /* Update machine state */
70 * We were about to enable interrupts but we have to simulate
71 * some interrupts that were lost by enable_irq first.
73 .globl do_lost_interrupts
80 lis r4,ppc_n_lost_interrupts@ha
81 lwz r4,ppc_n_lost_interrupts@l(r4)
94 * complement mask on the msr then "or" some values on.
95 * _nmask_and_or_msr(nmask, value_to_or)
97 _GLOBAL(_nmask_and_or_msr)
98 mfmsr r0 /* Get current msr */
99 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
100 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
101 sync /* Some chip revs have problems here... */
102 mtmsr r0 /* Update machine state */
120 * Flush MMU TLB for a particular address
132 * Flush instruction cache.
133 * This is a no-op on the 601.
135 _GLOBAL(flush_instruction_cache)
137 rlwinm r3,r3,16,16,31
139 beqlr /* for 601, do nothing */
140 /* 603/604 processor - use invalidate-all bit in HID0 */
148 * Write any modified data cache blocks out to memory
149 * and invalidate the corresponding instruction cache blocks.
150 * This is a no-op on the 601.
152 * flush_icache_range(unsigned long start, unsigned long stop)
154 _GLOBAL(flush_icache_range)
156 rlwinm r5,r5,16,16,31
158 beqlr /* for 601, do nothing */
159 li r5,CACHE_LINE_SIZE-1
163 srwi. r4,r4,LG_CACHE_LINE_SIZE
168 addi r3,r3,CACHE_LINE_SIZE
170 sync /* wait for dcbst's to get to ram */
173 addi r6,r6,CACHE_LINE_SIZE
180 * Like above, but only do the D-cache.
182 * flush_dcache_range(unsigned long start, unsigned long stop)
184 _GLOBAL(flush_dcache_range)
185 li r5,CACHE_LINE_SIZE-1
189 srwi. r4,r4,LG_CACHE_LINE_SIZE
194 addi r3,r3,CACHE_LINE_SIZE
196 sync /* wait for dcbst's to get to ram */
200 * Flush a particular page from the DATA cache
201 * Note: this is necessary because the instruction cache does *not*
202 * snoop from the data cache.
203 * This is a no-op on the 601 which has a unified cache.
205 * void flush_page_to_ram(void *page)
207 _GLOBAL(flush_page_to_ram)
209 rlwinm r5,r5,16,16,31
211 beqlr /* for 601, do nothing */
213 andc r3,r3,r4 /* Get page base address */
214 li r4,4096/CACHE_LINE_SIZE /* Number of lines in a page */
217 0: dcbst 0,r3 /* Write line to ram */
218 addi r3,r3,CACHE_LINE_SIZE
223 addi r6,r6,CACHE_LINE_SIZE
230 * Clear a page using the dcbz instruction, which doesn't cause any
231 * memory traffic (except to write out any cache lines which get
232 * displaced). This only works on cacheable memory.
235 li r0,4096/CACHE_LINE_SIZE
238 addi r3,r3,CACHE_LINE_SIZE
243 * Atomic [test&set] exchange
245 * unsigned long xchg_u32(void *ptr, unsigned long val)
246 * Changes the memory location '*ptr' to be val and returns
247 * the previous value stored there.
250 mr r5,r3 /* Save pointer */
251 10: lwarx r3,0,r5 /* Fetch old value & reserve */
252 stwcx. r4,0,r5 /* Update with new value */
253 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
257 * Try to acquire a spinlock.
258 * Only does the stwcx. if the load returned 0 - the Programming
259 * Environments Manual suggests not doing unnecessary stcwx.'s
260 * since they may inhibit forward progress by other CPUs in getting
263 _GLOBAL(__spin_trylock)
265 eieio /* prevent reordering of stores */
267 lwarx r3,0,r4 /* fetch old value, establish reservation */
268 cmpwi 0,r3,0 /* is it 0? */
269 bnelr- /* return failure if not */
270 stwcx. r5,0,r4 /* try to update with new value */
271 bne- 1f /* if we failed */
272 eieio /* prevent reordering of stores */
274 1: li r3,1 /* return non-zero for failure */
278 * Atomic add/sub/inc/dec operations
280 * void atomic_add(int c, int *v)
281 * void atomic_sub(int c, int *v)
282 * void atomic_inc(int *v)
283 * void atomic_dec(int *v)
284 * int atomic_dec_and_test(int *v)
285 * int atomic_inc_return(int *v)
286 * int atomic_dec_return(int *v)
287 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
288 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
291 10: lwarx r5,0,r4 /* Fetch old value & reserve */
292 add r5,r5,r3 /* Perform 'add' operation */
293 stwcx. r5,0,r4 /* Update with new value */
294 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
296 _GLOBAL(atomic_add_return)
297 10: lwarx r5,0,r4 /* Fetch old value & reserve */
298 add r5,r5,r3 /* Perform 'add' operation */
299 stwcx. r5,0,r4 /* Update with new value */
300 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
304 10: lwarx r5,0,r4 /* Fetch old value & reserve */
305 sub r5,r5,r3 /* Perform 'add' operation */
306 stwcx. r5,0,r4 /* Update with new value */
307 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
310 10: lwarx r5,0,r3 /* Fetch old value & reserve */
311 addi r5,r5,1 /* Perform 'add' operation */
312 stwcx. r5,0,r3 /* Update with new value */
313 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
315 _GLOBAL(atomic_inc_return)
316 10: lwarx r5,0,r3 /* Fetch old value & reserve */
317 addi r5,r5,1 /* Perform 'add' operation */
318 stwcx. r5,0,r3 /* Update with new value */
319 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
320 mr r3,r5 /* Return new value */
323 10: lwarx r5,0,r3 /* Fetch old value & reserve */
324 subi r5,r5,1 /* Perform 'add' operation */
325 stwcx. r5,0,r3 /* Update with new value */
326 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
328 _GLOBAL(atomic_dec_return)
329 10: lwarx r5,0,r3 /* Fetch old value & reserve */
330 subi r5,r5,1 /* Perform 'add' operation */
331 stwcx. r5,0,r3 /* Update with new value */
332 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
333 mr r3,r5 /* Return new value */
335 _GLOBAL(atomic_dec_and_test)
336 10: lwarx r5,0,r3 /* Fetch old value & reserve */
337 subi r5,r5,1 /* Perform 'add' operation */
338 stwcx. r5,0,r3 /* Update with new value */
339 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
340 cmpi 0,r5,0 /* Return 'true' IFF 0 */
345 _GLOBAL(atomic_clear_mask)
351 _GLOBAL(atomic_set_mask)
359 * I/O string operations
361 * insb(port, buf, len)
362 * outsb(port, buf, len)
363 * insw(port, buf, len)
364 * outsw(port, buf, len)
365 * insl(port, buf, len)
366 * outsl(port, buf, len)
367 * insw_ns(port, buf, len)
368 * outsw_ns(port, buf, len)
369 * insl_ns(port, buf, len)
370 * outsl_ns(port, buf, len)
372 * The *_ns versions don't do byte-swapping.
467 * Extended precision shifts
469 * R3/R4 has 64 bit value
473 * ashrdi3: XXXYYY/ZZZAAA -> SSSXXX/YYYZZZ
474 * ashldi3: XXXYYY/ZZZAAA -> YYYZZZ/AAA000
479 slw r7,r3,r6 /* isolate YYY */
480 srw r4,r4,r5 /* isolate ZZZ */
481 or r4,r4,r7 /* YYYZZZ */
482 sraw r3,r3,r5 /* SSSXXX */
488 srw r7,r4,r6 /* isolate ZZZ */
489 slw r4,r4,r5 /* AAA000 */
490 slw r3,r3,r5 /* YYY--- */
491 or r3,r3,r7 /* YYYZZZ */
501 mr r3,r1 /* Close enough */
533 Copyright © 1997-1998 by PowerLogix R & D, Inc.
535 This program is free software; you can redistribute it and/or modify
536 it under the terms of the GNU General Public License as published by
537 the Free Software Foundation; either version 2 of the License, or
538 (at your option) any later version.
540 This program is distributed in the hope that it will be useful,
541 but WITHOUT ANY WARRANTY; without even the implied warranty of
542 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
543 GNU General Public License for more details.
545 You should have received a copy of the GNU General Public License
546 along with this program; if not, write to the Free Software
547 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
551 - First public release, contributed by PowerLogix.
553 Author: Terry Greeniaus (tgree@phys.ualberta.ca)
554 Please e-mail updates to this file to me, thanks!
560 When setting the L2CR register, you must do a few special things. If you are enabling the
561 cache, you must perform a global invalidate. If you are disabling the cache, you must
562 flush the cache contents first. This routine takes care of doing these things. When first
563 enabling the cache, make sure you pass in the L2CR you want, as well as passing in the
564 global invalidate bit set. A global invalidate will only be performed if the L2I bit is set
565 in applyThis. When enabling the cache, you should also set the L2E bit in applyThis. If you
566 want to modify the L2CR contents after the cache has been enabled, the recommended
567 procedure is to first call __setL2CR(0) to disable the cache and then call it again with
568 the new values for L2CR. Examples:
570 _setL2CR(0) - disables the cache
571 _setL2CR(0xB3A04000) - enables my G3 upgrade card:
572 - L2E set to turn on the cache
575 - L2RAM set to pipelined syncronous late-write
576 - L2I set to perform a global invalidation
578 - L2DF set because this upgrade card requires it
580 A similar call should work for your card. You need to know the correct setting for your
581 card and then place them in the fields I have outlined above. Other fields support optional
582 features, such as L2DO which caches only data, or L2TS which causes cache pushes from
583 the L1 cache to go to the L2 cache instead of to main memory.
586 /* Make sure this is a 750 chip */
588 rlwinm r4,r4,16,16,31
595 /* Get the current enable bit of the L2CR into r4 */
599 /* See if we want to perform a global inval this time. */
600 rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
601 rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
602 rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
603 rlwinm r3,r3,0,1,31 /* Turn off the enable bit */
604 or r3,r3,r4 /* Keep the enable bit the same as it was for now. */
605 bne dontDisableCache /* Only disable the cache if L2CRApply has the enable bit off */
608 /* Disable the cache. First, we turn off data relocation. */
610 rlwinm r4,r7,0,28,26 /* Turn off DR bit */
611 rlwinm r4,r4,0,17,15 /* Turn off EE bit - an external exception while we are flushing
612 the cache is fatal (comment this line and see!) */
618 Now, read the first 2MB of memory to put new data in the cache.
619 (Actually we only need the size of the L2 cache plus
620 the size of the L1 cache, but 2MB will cover everything just to be safe).
627 addi r4,r4,0x0020 /* Go to start of next cache line */
630 /* Now, flush the first 2MB of memory */
637 addi r4,r4,0x0020 /* Go to start of next cache line */
640 /* Turn off the L2CR enable bit. */
643 /* Reenable data relocation. */
649 /* Set up the L2CR configuration bits */
656 /* Perform a global invalidation */
661 invalCompleteLoop: /* Wait for the invalidation to complete */
663 rlwinm. r4,r3,0,31,31
664 bne invalCompleteLoop
666 rlwinm r3,r3,0,11,9; /* Turn off the L2I bit */
672 /* See if we need to enable the cache */
677 /* Enable the cache */
684 /* Make sure this is a 750 chip */
686 rlwinm r3,r3,16,16,31
691 /* Return the L2CR contents */
695 /* --- End of PowerLogix code ---
710 * These are used in the alignment trap handler when emulating
711 * single-precision loads and stores.
712 * We restore and save the fpscr so the task gets the same result
713 * and exceptions as if the cpu had performed the load or store.
717 lfd 0,-4(r5) /* load up fpscr value */
721 mffs 0 /* save new fpscr value */
727 lfd 0,-4(r5) /* load up fpscr value */
731 mffs 0 /* save new fpscr value */
735 .globl __clear_msr_me
737 mfmsr r0 /* Get current interrupt state */
740 andc r0,r0,r3 /* Clears bit in (r4) */
741 sync /* Some chip revs have problems here */
742 mtmsr r0 /* Update machine state */
746 * Create a kernel thread
747 * kernel_thread(fn, arg, flags)
749 _GLOBAL(kernel_thread)
750 mr r6,r3 /* function */
751 ori r3,r5,CLONE_VM /* flags */
754 cmpi 0,r3,0 /* parent or child? */
755 bnelr /* return if parent */
756 li r0,0 /* clear out p->thread.regs */
757 stw r0,THREAD+PT_REGS(r2) /* since we don't have user ctx */
758 mtlr r6 /* fn addr in lr */
759 mr r3,r4 /* load arg and call fn */
761 li r0,__NR_exit /* exit after child exits */
766 * This routine is just here to keep GCC happy - sigh...
771 #define SYSCALL(name) \
777 stw r3,errno@l(r4); \
781 #define __NR__exit __NR_exit
792 SYSCALL(delete_module)
797 /* Why isn't this a) automatic, b) written in 'C'? */
800 .globl sys_call_table
802 .long sys_ni_syscall /* 0 - old "setup()" system call */
807 .long sys_open /* 5 */
812 .long sys_unlink /* 10 */
817 .long sys_chmod /* 15 */
819 .long sys_ni_syscall /* old break syscall holder */
822 .long sys_getpid /* 20 */
827 .long sys_stime /* 25 */
832 .long sys_utime /* 30 */
833 .long sys_ni_syscall /* old stty syscall holder */
834 .long sys_ni_syscall /* old gtty syscall holder */
837 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
842 .long sys_rmdir /* 40 */
846 .long sys_ni_syscall /* old prof syscall holder */
847 .long sys_brk /* 45 */
852 .long sys_getegid /* 50 */
854 .long sys_umount /* recycled never used phys() */
855 .long sys_ni_syscall /* old lock syscall holder */
857 .long sys_fcntl /* 55 */
858 .long sys_ni_syscall /* old mpx syscall holder */
860 .long sys_ni_syscall /* old ulimit syscall holder */
862 .long sys_umask /* 60 */
867 .long sys_getpgrp /* 65 */
872 .long sys_setreuid /* 70 */
876 .long sys_sethostname
877 .long sys_setrlimit /* 75 */
880 .long sys_gettimeofday
881 .long sys_settimeofday
882 .long sys_getgroups /* 80 */
887 .long sys_readlink /* 85 */
892 .long sys_mmap /* 90 */
897 .long sys_fchown /* 95 */
898 .long sys_getpriority
899 .long sys_setpriority
900 .long sys_ni_syscall /* old profil syscall holder */
902 .long sys_fstatfs /* 100 */
907 .long sys_getitimer /* 105 */
912 .long sys_iopl /* 110 */
914 .long sys_ni_syscall /* old 'idle' syscall */
917 .long sys_swapoff /* 115 */
922 .long sys_clone /* 120 */
923 .long sys_setdomainname
927 .long sys_mprotect /* 125 */
928 .long sys_sigprocmask
929 .long sys_create_module
930 .long sys_init_module
931 .long sys_delete_module
932 .long sys_get_kernel_syms /* 130 */
937 .long sys_sysfs /* 135 */
938 .long sys_personality
939 .long sys_ni_syscall /* for afs_syscall */
942 .long sys_llseek /* 140 */
947 .long sys_readv /* 145 */
952 .long sys_mlock /* 150 */
956 .long sys_sched_setparam
957 .long sys_sched_getparam /* 155 */
958 .long sys_sched_setscheduler
959 .long sys_sched_getscheduler
960 .long sys_sched_yield
961 .long sys_sched_get_priority_max
962 .long sys_sched_get_priority_min /* 160 */
963 .long sys_sched_rr_get_interval
967 .long sys_getresuid /* 165 */
968 .long sys_query_module
976 .long sys_getresgid /* 170 */
978 .long sys_rt_sigreturn
979 .long sys_rt_sigaction
980 .long sys_rt_sigprocmask
981 .long sys_rt_sigpending /* 175 */
982 .long sys_rt_sigtimedwait
983 .long sys_rt_sigqueueinfo
984 .long sys_rt_sigsuspend
986 .long sys_pwrite /* 180 */
991 .long sys_sigaltstack /* 185 */
993 .long sys_ni_syscall /* streams1 */
994 .long sys_ni_syscall /* streams2 */
996 .space (NR_syscalls-183)*4