1 #ifndef __ALPHA_SYSTEM_H
2 #define __ALPHA_SYSTEM_H
6 #include <asm/barrier.h>
9 * System defines.. Note that this is included both from .c and .S
10 * files, so it does only defines, not any C code.
14 * We leave one page for the initial stack page, and one page for
15 * the initial process structure. Also, the console eats 3 MB for
16 * the initial bootloader (one of which we can reclaim later).
18 #define BOOT_PCB 0x20000000
19 #define BOOT_ADDR 0x20000000
20 /* Remove when official MILO sources have ELF support: */
21 #define BOOT_SIZE (16*1024)
23 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
24 #define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
26 #define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
29 #define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
30 #define SWAPPER_PGD KERNEL_START
31 #define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
32 #define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
33 #define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
34 #define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
36 #define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
39 * This is setup by the secondary bootstrap loader. Because
40 * the zero page is zeroed out as soon as the vm system is
41 * initialized, we need to copy things out into a more permanent
44 #define PARAM ZERO_PGE
45 #define COMMAND_LINE ((char*)(PARAM + 0x0000))
46 #define INITRD_START (*(unsigned long *) (PARAM+0x100))
47 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
50 #include <linux/kernel.h>
53 * This is the logout header that should be common to all platforms
54 * (assuming they are running OSF/1 PALcode, I guess).
57 unsigned int size
; /* size in bytes of logout area */
58 unsigned int sbz1
: 30; /* should be zero */
59 unsigned int err2
: 1; /* second error */
60 unsigned int retry
: 1; /* retry flag */
61 unsigned int proc_offset
; /* processor-specific offset */
62 unsigned int sys_offset
; /* system-specific offset */
63 unsigned int code
; /* machine check code */
64 unsigned int frame_rev
; /* frame revision */
67 /* Machine Check Frame for uncorrectable errors (Large format)
68 * --- This is used to log uncorrectable errors such as
69 * double bit ECC errors.
70 * --- These errors are detected by both processor and systems.
72 struct el_common_EV5_uncorrectable_mcheck
{
73 unsigned long shadow
[8]; /* Shadow reg. 8-14, 25 */
74 unsigned long paltemp
[24]; /* PAL TEMP REGS. */
75 unsigned long exc_addr
; /* Address of excepting instruction*/
76 unsigned long exc_sum
; /* Summary of arithmetic traps. */
77 unsigned long exc_mask
; /* Exception mask (from exc_sum). */
78 unsigned long pal_base
; /* Base address for PALcode. */
79 unsigned long isr
; /* Interrupt Status Reg. */
80 unsigned long icsr
; /* CURRENT SETUP OF EV5 IBOX */
81 unsigned long ic_perr_stat
; /* I-CACHE Reg. <11> set Data parity
83 unsigned long dc_perr_stat
; /* D-CACHE error Reg. Bits set to 1:
84 <2> Data error in bank 0
85 <3> Data error in bank 1
86 <4> Tag error in bank 0
87 <5> Tag error in bank 1 */
88 unsigned long va
; /* Effective VA of fault or miss. */
89 unsigned long mm_stat
; /* Holds the reason for D-stream
90 fault or D-cache parity errors */
91 unsigned long sc_addr
; /* Address that was being accessed
92 when EV5 detected Secondary cache
94 unsigned long sc_stat
; /* Helps determine if the error was
95 TAG/Data parity(Secondary Cache)*/
96 unsigned long bc_tag_addr
; /* Contents of EV5 BC_TAG_ADDR */
97 unsigned long ei_addr
; /* Physical address of any transfer
98 that is logged in EV5 EI_STAT */
99 unsigned long fill_syndrome
; /* For correcting ECC errors. */
100 unsigned long ei_stat
; /* Helps identify reason of any
101 processor uncorrectable error
102 at its external interface. */
103 unsigned long ld_lock
; /* Contents of EV5 LD_LOCK register*/
106 struct el_common_EV6_mcheck
{
107 unsigned int FrameSize
; /* Bytes, including this field */
108 unsigned int FrameFlags
; /* <31> = Retry, <30> = Second Error */
109 unsigned int CpuOffset
; /* Offset to CPU-specific info */
110 unsigned int SystemOffset
; /* Offset to system-specific info */
111 unsigned int MCHK_Code
;
112 unsigned int MCHK_Frame_Rev
;
113 unsigned long I_STAT
; /* EV6 Internal Processor Registers */
114 unsigned long DC_STAT
; /* (See the 21264 Spec) */
115 unsigned long C_ADDR
;
116 unsigned long DC1_SYNDROME
;
117 unsigned long DC0_SYNDROME
;
118 unsigned long C_STAT
;
120 unsigned long MM_STAT
;
121 unsigned long EXC_ADDR
;
122 unsigned long IER_CM
;
124 unsigned long RESERVED0
;
125 unsigned long PAL_BASE
;
130 extern void halt(void) __attribute__((noreturn
));
131 #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
133 #define switch_to(P,N,L) \
135 (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
136 check_mmu_context(); \
140 extern struct task_struct
*alpha_switch_to(unsigned long, struct task_struct
*);
143 * On SMP systems, when the scheduler does migration-cost autodetection,
144 * it needs a way to flush as much of the CPU's caches as possible.
146 * TODO: fill this in!
148 static inline void sched_cacheflush(void)
153 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
156 __asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
164 #ifdef CONFIG_ALPHA_GENERIC
166 ({ unsigned long __implver; \
167 __asm__ ("implver %0" : "=r"(__implver)); \
168 (enum implver_enum) __implver; })
170 /* Try to eliminate some dead code. */
171 #ifdef CONFIG_ALPHA_EV4
172 #define implver() IMPLVER_EV4
174 #ifdef CONFIG_ALPHA_EV5
175 #define implver() IMPLVER_EV5
177 #if defined(CONFIG_ALPHA_EV6)
178 #define implver() IMPLVER_EV6
183 AMASK_BWX
= (1UL << 0),
184 AMASK_FIX
= (1UL << 1),
185 AMASK_CIX
= (1UL << 2),
186 AMASK_MAX
= (1UL << 8),
187 AMASK_PRECISE_TRAP
= (1UL << 9),
190 #define amask(mask) \
191 ({ unsigned long __amask, __input = (mask); \
192 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \
195 #define __CALL_PAL_R0(NAME, TYPE) \
196 static inline TYPE NAME(void) \
198 register TYPE __r0 __asm__("$0"); \
199 __asm__ __volatile__( \
200 "call_pal %1 # " #NAME \
202 :"i" (PAL_ ## NAME) \
203 :"$1", "$16", "$22", "$23", "$24", "$25"); \
207 #define __CALL_PAL_W1(NAME, TYPE0) \
208 static inline void NAME(TYPE0 arg0) \
210 register TYPE0 __r16 __asm__("$16") = arg0; \
211 __asm__ __volatile__( \
212 "call_pal %1 # "#NAME \
214 : "i"(PAL_ ## NAME), "0"(__r16) \
215 : "$1", "$22", "$23", "$24", "$25"); \
218 #define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
219 static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
221 register TYPE0 __r16 __asm__("$16") = arg0; \
222 register TYPE1 __r17 __asm__("$17") = arg1; \
223 __asm__ __volatile__( \
224 "call_pal %2 # "#NAME \
225 : "=r"(__r16), "=r"(__r17) \
226 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
227 : "$1", "$22", "$23", "$24", "$25"); \
230 #define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
231 static inline RTYPE NAME(TYPE0 arg0) \
233 register RTYPE __r0 __asm__("$0"); \
234 register TYPE0 __r16 __asm__("$16") = arg0; \
235 __asm__ __volatile__( \
236 "call_pal %2 # "#NAME \
237 : "=r"(__r16), "=r"(__r0) \
238 : "i"(PAL_ ## NAME), "0"(__r16) \
239 : "$1", "$22", "$23", "$24", "$25"); \
243 #define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
244 static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
246 register RTYPE __r0 __asm__("$0"); \
247 register TYPE0 __r16 __asm__("$16") = arg0; \
248 register TYPE1 __r17 __asm__("$17") = arg1; \
249 __asm__ __volatile__( \
250 "call_pal %3 # "#NAME \
251 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \
252 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
253 : "$1", "$22", "$23", "$24", "$25"); \
257 __CALL_PAL_W1(cflush
, unsigned long);
258 __CALL_PAL_R0(rdmces
, unsigned long);
259 __CALL_PAL_R0(rdps
, unsigned long);
260 __CALL_PAL_R0(rdusp
, unsigned long);
261 __CALL_PAL_RW1(swpipl
, unsigned long, unsigned long);
262 __CALL_PAL_R0(whami
, unsigned long);
263 __CALL_PAL_W2(wrent
, void*, unsigned long);
264 __CALL_PAL_W1(wripir
, unsigned long);
265 __CALL_PAL_W1(wrkgp
, unsigned long);
266 __CALL_PAL_W1(wrmces
, unsigned long);
267 __CALL_PAL_RW2(wrperfmon
, unsigned long, unsigned long, unsigned long);
268 __CALL_PAL_W1(wrusp
, unsigned long);
269 __CALL_PAL_W1(wrvptptr
, unsigned long);
278 #define IPL_POWERFAIL 6
282 #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
284 #define IPL_MIN __min_ipl
285 extern int __min_ipl
;
288 #define getipl() (rdps() & 7)
289 #define setipl(ipl) ((void) swpipl(ipl))
291 #define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0)
292 #define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0)
293 #define local_save_flags(flags) ((flags) = rdps())
294 #define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
295 #define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0)
297 #define irqs_disabled() (getipl() == IPL_MAX)
302 #define __tbi(nr,arg,arg1...) \
304 register unsigned long __r16 __asm__("$16") = (nr); \
305 register unsigned long __r17 __asm__("$17"); arg; \
306 __asm__ __volatile__( \
307 "call_pal %3 #__tbi" \
308 :"=r" (__r16),"=r" (__r17) \
309 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \
310 :"$0", "$1", "$22", "$23", "$24", "$25"); \
313 #define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17))
314 #define tbisi(x) __tbi(1,__r17=(x),"1" (__r17))
315 #define tbisd(x) __tbi(2,__r17=(x),"1" (__r17))
316 #define tbis(x) __tbi(3,__r17=(x),"1" (__r17))
317 #define tbiap() __tbi(-1, /* no second argument */)
318 #define tbia() __tbi(-2, /* no second argument */)
322 * Since it can be used to implement critical sections
323 * it must clobber "memory" (also for interrupts in UP).
326 static inline unsigned long
327 __xchg_u8(volatile char *m
, unsigned long val
)
329 unsigned long ret
, tmp
, addr64
;
331 __asm__
__volatile__(
334 "1: ldq_l %2,0(%3)\n"
346 : "=&r" (ret
), "=&r" (val
), "=&r" (tmp
), "=&r" (addr64
)
347 : "r" ((long)m
), "1" (val
) : "memory");
352 static inline unsigned long
353 __xchg_u16(volatile short *m
, unsigned long val
)
355 unsigned long ret
, tmp
, addr64
;
357 __asm__
__volatile__(
360 "1: ldq_l %2,0(%3)\n"
372 : "=&r" (ret
), "=&r" (val
), "=&r" (tmp
), "=&r" (addr64
)
373 : "r" ((long)m
), "1" (val
) : "memory");
378 static inline unsigned long
379 __xchg_u32(volatile int *m
, unsigned long val
)
383 __asm__
__volatile__(
394 : "=&r" (val
), "=&r" (dummy
), "=m" (*m
)
395 : "rI" (val
), "m" (*m
) : "memory");
400 static inline unsigned long
401 __xchg_u64(volatile long *m
, unsigned long val
)
405 __asm__
__volatile__(
416 : "=&r" (val
), "=&r" (dummy
), "=m" (*m
)
417 : "rI" (val
), "m" (*m
) : "memory");
422 /* This function doesn't exist, so you'll get a linker error
423 if something tries to do an invalid xchg(). */
424 extern void __xchg_called_with_bad_pointer(void);
426 #define __xchg(ptr, x, size) \
428 unsigned long __xchg__res; \
429 volatile void *__xchg__ptr = (ptr); \
431 case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \
432 case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \
433 case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \
434 case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \
435 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
440 #define xchg(ptr,x) \
442 __typeof__(*(ptr)) _x_ = (x); \
443 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
446 static inline unsigned long
447 __xchg_u8_local(volatile char *m
, unsigned long val
)
449 unsigned long ret
, tmp
, addr64
;
451 __asm__
__volatile__(
454 "1: ldq_l %2,0(%3)\n"
463 : "=&r" (ret
), "=&r" (val
), "=&r" (tmp
), "=&r" (addr64
)
464 : "r" ((long)m
), "1" (val
) : "memory");
469 static inline unsigned long
470 __xchg_u16_local(volatile short *m
, unsigned long val
)
472 unsigned long ret
, tmp
, addr64
;
474 __asm__
__volatile__(
477 "1: ldq_l %2,0(%3)\n"
486 : "=&r" (ret
), "=&r" (val
), "=&r" (tmp
), "=&r" (addr64
)
487 : "r" ((long)m
), "1" (val
) : "memory");
492 static inline unsigned long
493 __xchg_u32_local(volatile int *m
, unsigned long val
)
497 __asm__
__volatile__(
505 : "=&r" (val
), "=&r" (dummy
), "=m" (*m
)
506 : "rI" (val
), "m" (*m
) : "memory");
511 static inline unsigned long
512 __xchg_u64_local(volatile long *m
, unsigned long val
)
516 __asm__
__volatile__(
524 : "=&r" (val
), "=&r" (dummy
), "=m" (*m
)
525 : "rI" (val
), "m" (*m
) : "memory");
530 #define __xchg_local(ptr, x, size) \
532 unsigned long __xchg__res; \
533 volatile void *__xchg__ptr = (ptr); \
535 case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \
536 case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \
537 case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \
538 case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \
539 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
544 #define xchg_local(ptr,x) \
546 __typeof__(*(ptr)) _x_ = (x); \
547 (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
552 * Atomic compare and exchange. Compare OLD with MEM, if identical,
553 * store NEW in MEM. Return the initial value in MEM. Success is
554 * indicated by comparing RETURN with OLD.
556 * The memory barrier should be placed in SMP only when we actually
557 * make the change. If we don't change anything (so if the returned
558 * prev is equal to old) then we aren't acquiring anything new and
559 * we don't need any memory barrier as far I can tell.
562 #define __HAVE_ARCH_CMPXCHG 1
564 static inline unsigned long
565 __cmpxchg_u8(volatile char *m
, long old
, long new)
567 unsigned long prev
, tmp
, cmp
, addr64
;
569 __asm__
__volatile__(
572 "1: ldq_l %2,0(%4)\n"
587 : "=&r" (prev
), "=&r" (new), "=&r" (tmp
), "=&r" (cmp
), "=&r" (addr64
)
588 : "r" ((long)m
), "Ir" (old
), "1" (new) : "memory");
593 static inline unsigned long
594 __cmpxchg_u16(volatile short *m
, long old
, long new)
596 unsigned long prev
, tmp
, cmp
, addr64
;
598 __asm__
__volatile__(
601 "1: ldq_l %2,0(%4)\n"
616 : "=&r" (prev
), "=&r" (new), "=&r" (tmp
), "=&r" (cmp
), "=&r" (addr64
)
617 : "r" ((long)m
), "Ir" (old
), "1" (new) : "memory");
622 static inline unsigned long
623 __cmpxchg_u32(volatile int *m
, int old
, int new)
625 unsigned long prev
, cmp
;
627 __asm__
__volatile__(
641 : "=&r"(prev
), "=&r"(cmp
), "=m"(*m
)
642 : "r"((long) old
), "r"(new), "m"(*m
) : "memory");
647 static inline unsigned long
648 __cmpxchg_u64(volatile long *m
, unsigned long old
, unsigned long new)
650 unsigned long prev
, cmp
;
652 __asm__
__volatile__(
666 : "=&r"(prev
), "=&r"(cmp
), "=m"(*m
)
667 : "r"((long) old
), "r"(new), "m"(*m
) : "memory");
672 /* This function doesn't exist, so you'll get a linker error
673 if something tries to do an invalid cmpxchg(). */
674 extern void __cmpxchg_called_with_bad_pointer(void);
676 static __always_inline
unsigned long
677 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
681 return __cmpxchg_u8(ptr
, old
, new);
683 return __cmpxchg_u16(ptr
, old
, new);
685 return __cmpxchg_u32(ptr
, old
, new);
687 return __cmpxchg_u64(ptr
, old
, new);
689 __cmpxchg_called_with_bad_pointer();
693 #define cmpxchg(ptr,o,n) \
695 __typeof__(*(ptr)) _o_ = (o); \
696 __typeof__(*(ptr)) _n_ = (n); \
697 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
698 (unsigned long)_n_, sizeof(*(ptr))); \
701 static inline unsigned long
702 __cmpxchg_u8_local(volatile char *m
, long old
, long new)
704 unsigned long prev
, tmp
, cmp
, addr64
;
706 __asm__
__volatile__(
709 "1: ldq_l %2,0(%4)\n"
721 : "=&r" (prev
), "=&r" (new), "=&r" (tmp
), "=&r" (cmp
), "=&r" (addr64
)
722 : "r" ((long)m
), "Ir" (old
), "1" (new) : "memory");
727 static inline unsigned long
728 __cmpxchg_u16_local(volatile short *m
, long old
, long new)
730 unsigned long prev
, tmp
, cmp
, addr64
;
732 __asm__
__volatile__(
735 "1: ldq_l %2,0(%4)\n"
747 : "=&r" (prev
), "=&r" (new), "=&r" (tmp
), "=&r" (cmp
), "=&r" (addr64
)
748 : "r" ((long)m
), "Ir" (old
), "1" (new) : "memory");
753 static inline unsigned long
754 __cmpxchg_u32_local(volatile int *m
, int old
, int new)
756 unsigned long prev
, cmp
;
758 __asm__
__volatile__(
769 : "=&r"(prev
), "=&r"(cmp
), "=m"(*m
)
770 : "r"((long) old
), "r"(new), "m"(*m
) : "memory");
775 static inline unsigned long
776 __cmpxchg_u64_local(volatile long *m
, unsigned long old
, unsigned long new)
778 unsigned long prev
, cmp
;
780 __asm__
__volatile__(
791 : "=&r"(prev
), "=&r"(cmp
), "=m"(*m
)
792 : "r"((long) old
), "r"(new), "m"(*m
) : "memory");
797 static __always_inline
unsigned long
798 __cmpxchg_local(volatile void *ptr
, unsigned long old
, unsigned long new,
803 return __cmpxchg_u8_local(ptr
, old
, new);
805 return __cmpxchg_u16_local(ptr
, old
, new);
807 return __cmpxchg_u32_local(ptr
, old
, new);
809 return __cmpxchg_u64_local(ptr
, old
, new);
811 __cmpxchg_called_with_bad_pointer();
815 #define cmpxchg_local(ptr,o,n) \
817 __typeof__(*(ptr)) _o_ = (o); \
818 __typeof__(*(ptr)) _n_ = (n); \
819 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
820 (unsigned long)_n_, sizeof(*(ptr))); \
823 #endif /* __ASSEMBLY__ */
825 #define arch_align_stack(x) (x)