- Kai Germaschewski: ISDN update (including Makefiles)
[davej-history.git] / include / asm-alpha / system.h
blob3a8c9385dd49b65c37ea90a56aa7ba9bed99d817
1 #ifndef __ALPHA_SYSTEM_H
2 #define __ALPHA_SYSTEM_H
4 #include <linux/config.h>
5 #include <asm/pal.h>
6 #include <asm/page.h>
8 /*
9 * System defines.. Note that this is included both from .c and .S
10 * files, so it does only defines, not any C code.
14 * We leave one page for the initial stack page, and one page for
15 * the initial process structure. Also, the console eats 3 MB for
16 * the initial bootloader (one of which we can reclaim later).
18 #define BOOT_PCB 0x20000000
19 #define BOOT_ADDR 0x20000000
20 /* Remove when official MILO sources have ELF support: */
21 #define BOOT_SIZE (16*1024)
23 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
24 #define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
25 #else
26 #define KERNEL_START_PHYS 0x800000 /* Wildfire has a huge console */
27 #endif
29 #define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
30 #define SWAPPER_PGD KERNEL_START
31 #define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
32 #define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
33 #define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
34 #define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
36 #define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
38 #ifndef __ASSEMBLY__
39 #include <linux/kernel.h>
42 * This is the logout header that should be common to all platforms
43 * (assuming they are running OSF/1 PALcode, I guess).
45 struct el_common {
46 unsigned int size; /* size in bytes of logout area */
47 int sbz1 : 30; /* should be zero */
48 int err2 : 1; /* second error */
49 int retry : 1; /* retry flag */
50 unsigned int proc_offset; /* processor-specific offset */
51 unsigned int sys_offset; /* system-specific offset */
52 unsigned long code; /* machine check code */
55 /* Machine Check Frame for uncorrectable errors (Large format)
56 * --- This is used to log uncorrectable errors such as
57 * double bit ECC errors.
58 * --- These errors are detected by both processor and systems.
60 struct el_common_EV5_uncorrectable_mcheck {
61 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */
62 unsigned long paltemp[24]; /* PAL TEMP REGS. */
63 unsigned long exc_addr; /* Address of excepting instruction*/
64 unsigned long exc_sum; /* Summary of arithmetic traps. */
65 unsigned long exc_mask; /* Exception mask (from exc_sum). */
66 unsigned long pal_base; /* Base address for PALcode. */
67 unsigned long isr; /* Interrupt Status Reg. */
68 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */
69 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity
70 <12> set TAG parity*/
71 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1:
72 <2> Data error in bank 0
73 <3> Data error in bank 1
74 <4> Tag error in bank 0
75 <5> Tag error in bank 1 */
76 unsigned long va; /* Effective VA of fault or miss. */
77 unsigned long mm_stat; /* Holds the reason for D-stream
78 fault or D-cache parity errors */
79 unsigned long sc_addr; /* Address that was being accessed
80 when EV5 detected Secondary cache
81 failure. */
82 unsigned long sc_stat; /* Helps determine if the error was
83 TAG/Data parity(Secondary Cache)*/
84 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */
85 unsigned long ei_addr; /* Physical address of any transfer
86 that is logged in EV5 EI_STAT */
87 unsigned long fill_syndrome; /* For correcting ECC errors. */
88 unsigned long ei_stat; /* Helps identify reason of any
89 processor uncorrectable error
90 at its external interface. */
91 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
94 struct el_common_EV6_mcheck {
95 unsigned int FrameSize; /* Bytes, including this field */
96 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
97 unsigned int CpuOffset; /* Offset to CPU-specific info */
98 unsigned int SystemOffset; /* Offset to system-specific info */
99 unsigned int MCHK_Code;
100 unsigned int MCHK_Frame_Rev;
101 unsigned long I_STAT; /* EV6 Internal Processor Registers */
102 unsigned long DC_STAT; /* (See the 21264 Spec) */
103 unsigned long C_ADDR;
104 unsigned long DC1_SYNDROME;
105 unsigned long DC0_SYNDROME;
106 unsigned long C_STAT;
107 unsigned long C_STS;
108 unsigned long RESERVED0;
109 unsigned long EXC_ADDR;
110 unsigned long IER_CM;
111 unsigned long ISUM;
112 unsigned long MM_STAT;
113 unsigned long PAL_BASE;
114 unsigned long I_CTL;
115 unsigned long PCTX;
118 extern void halt(void) __attribute__((noreturn));
119 #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
121 #define prepare_to_switch() do { } while(0)
122 #define switch_to(prev,next,last) \
123 do { \
124 unsigned long pcbb; \
125 current = (next); \
126 pcbb = virt_to_phys(&current->thread); \
127 (last) = alpha_switch_to(pcbb, (prev)); \
128 check_mmu_context(); \
129 } while (0)
131 extern struct task_struct* alpha_switch_to(unsigned long, struct task_struct*);
133 #define mb() \
134 __asm__ __volatile__("mb": : :"memory")
136 #define rmb() \
137 __asm__ __volatile__("mb": : :"memory")
139 #define wmb() \
140 __asm__ __volatile__("wmb": : :"memory")
142 #ifdef CONFIG_SMP
143 #define smp_mb() mb()
144 #define smp_rmb() rmb()
145 #define smp_wmb() wmb()
146 #else
147 #define smp_mb() barrier()
148 #define smp_rmb() barrier()
149 #define smp_wmb() barrier()
150 #endif
152 #define set_mb(var, value) \
153 do { var = value; mb(); } while (0)
155 #define set_wmb(var, value) \
156 do { var = value; wmb(); } while (0)
158 #define imb() \
159 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
161 #define draina() \
162 __asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
164 enum implver_enum {
165 IMPLVER_EV4,
166 IMPLVER_EV5,
167 IMPLVER_EV6
170 #ifdef CONFIG_ALPHA_GENERIC
171 #define implver() \
172 ({ unsigned long __implver; \
173 __asm__ ("implver %0" : "=r"(__implver)); \
174 (enum implver_enum) __implver; })
175 #else
176 /* Try to eliminate some dead code. */
177 #ifdef CONFIG_ALPHA_EV4
178 #define implver() IMPLVER_EV4
179 #endif
180 #ifdef CONFIG_ALPHA_EV5
181 #define implver() IMPLVER_EV5
182 #endif
183 #ifdef CONFIG_ALPHA_EV6
184 #define implver() IMPLVER_EV6
185 #endif
186 #endif
188 enum amask_enum {
189 AMASK_BWX = (1UL << 0),
190 AMASK_FIX = (1UL << 1),
191 AMASK_MAX = (1UL << 8),
192 AMASK_PRECISE_TRAP = (1UL << 9),
195 #define amask(mask) \
196 ({ unsigned long __amask, __input = (mask); \
197 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \
198 __amask; })
200 #define __CALL_PAL_R0(NAME, TYPE) \
201 static inline TYPE NAME(void) \
203 register TYPE __r0 __asm__("$0"); \
204 __asm__ __volatile__( \
205 "call_pal %1 # " #NAME \
206 :"=r" (__r0) \
207 :"i" (PAL_ ## NAME) \
208 :"$1", "$16", "$22", "$23", "$24", "$25"); \
209 return __r0; \
212 #define __CALL_PAL_W1(NAME, TYPE0) \
213 static inline void NAME(TYPE0 arg0) \
215 register TYPE0 __r16 __asm__("$16") = arg0; \
216 __asm__ __volatile__( \
217 "call_pal %1 # "#NAME \
218 : "=r"(__r16) \
219 : "i"(PAL_ ## NAME), "0"(__r16) \
220 : "$1", "$22", "$23", "$24", "$25"); \
223 #define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
224 static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
226 register TYPE0 __r16 __asm__("$16") = arg0; \
227 register TYPE1 __r17 __asm__("$17") = arg1; \
228 __asm__ __volatile__( \
229 "call_pal %2 # "#NAME \
230 : "=r"(__r16), "=r"(__r17) \
231 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
232 : "$1", "$22", "$23", "$24", "$25"); \
235 #define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
236 static inline RTYPE NAME(TYPE0 arg0) \
238 register RTYPE __r0 __asm__("$0"); \
239 register TYPE0 __r16 __asm__("$16") = arg0; \
240 __asm__ __volatile__( \
241 "call_pal %2 # "#NAME \
242 : "=r"(__r16), "=r"(__r0) \
243 : "i"(PAL_ ## NAME), "0"(__r16) \
244 : "$1", "$22", "$23", "$24", "$25"); \
245 return __r0; \
248 #define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
249 static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
251 register RTYPE __r0 __asm__("$0"); \
252 register TYPE0 __r16 __asm__("$16") = arg0; \
253 register TYPE1 __r17 __asm__("$17") = arg1; \
254 __asm__ __volatile__( \
255 "call_pal %3 # "#NAME \
256 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \
257 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
258 : "$1", "$22", "$23", "$24", "$25"); \
259 return __r0; \
262 __CALL_PAL_W1(cflush, unsigned long);
263 __CALL_PAL_R0(rdmces, unsigned long);
264 __CALL_PAL_R0(rdps, unsigned long);
265 __CALL_PAL_R0(rdusp, unsigned long);
266 __CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
267 __CALL_PAL_R0(whami, unsigned long);
268 __CALL_PAL_W2(wrent, void*, unsigned long);
269 __CALL_PAL_W1(wripir, unsigned long);
270 __CALL_PAL_W1(wrkgp, unsigned long);
271 __CALL_PAL_W1(wrmces, unsigned long);
272 __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
273 __CALL_PAL_W1(wrusp, unsigned long);
274 __CALL_PAL_W1(wrvptptr, unsigned long);
276 #define IPL_MIN 0
277 #define IPL_SW0 1
278 #define IPL_SW1 2
279 #define IPL_DEV0 3
280 #define IPL_DEV1 4
281 #define IPL_TIMER 5
282 #define IPL_PERF 6
283 #define IPL_POWERFAIL 6
284 #define IPL_MCHECK 7
285 #define IPL_MAX 7
287 #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
288 #undef IPL_MIN
289 #define IPL_MIN __min_ipl
290 extern int __min_ipl;
291 #endif
293 #define getipl() (rdps() & 7)
294 #define setipl(ipl) ((void) swpipl(ipl))
296 #define __cli() do { setipl(IPL_MAX); barrier(); } while(0)
297 #define __sti() do { barrier(); setipl(IPL_MIN); } while(0)
298 #define __save_flags(flags) ((flags) = rdps())
299 #define __save_and_cli(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
300 #define __restore_flags(flags) do { barrier(); setipl(flags); barrier(); } while(0)
302 #define local_irq_save(flags) __save_and_cli(flags)
303 #define local_irq_restore(flags) __restore_flags(flags)
304 #define local_irq_disable() __cli()
305 #define local_irq_enable() __sti()
307 #ifdef CONFIG_SMP
309 extern int global_irq_holder;
311 #define save_and_cli(flags) (save_flags(flags), cli())
313 extern void __global_cli(void);
314 extern void __global_sti(void);
315 extern unsigned long __global_save_flags(void);
316 extern void __global_restore_flags(unsigned long flags);
318 #define cli() __global_cli()
319 #define sti() __global_sti()
320 #define save_flags(flags) ((flags) = __global_save_flags())
321 #define restore_flags(flags) __global_restore_flags(flags)
323 #else /* CONFIG_SMP */
325 #define cli() __cli()
326 #define sti() __sti()
327 #define save_flags(flags) __save_flags(flags)
328 #define save_and_cli(flags) __save_and_cli(flags)
329 #define restore_flags(flags) __restore_flags(flags)
331 #endif /* CONFIG_SMP */
334 * TB routines..
336 #define __tbi(nr,arg,arg1...) \
337 ({ \
338 register unsigned long __r16 __asm__("$16") = (nr); \
339 register unsigned long __r17 __asm__("$17"); arg; \
340 __asm__ __volatile__( \
341 "call_pal %3 #__tbi" \
342 :"=r" (__r16),"=r" (__r17) \
343 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \
344 :"$0", "$1", "$22", "$23", "$24", "$25"); \
347 #define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17))
348 #define tbisi(x) __tbi(1,__r17=(x),"1" (__r17))
349 #define tbisd(x) __tbi(2,__r17=(x),"1" (__r17))
350 #define tbis(x) __tbi(3,__r17=(x),"1" (__r17))
351 #define tbiap() __tbi(-1, /* no second argument */)
352 #define tbia() __tbi(-2, /* no second argument */)
355 * Atomic exchange.
356 * Since it can be used to implement critical sections
357 * it must clobber "memory" (also for interrupts in UP).
360 extern __inline__ unsigned long
361 __xchg_u32(volatile int *m, unsigned long val)
363 unsigned long dummy;
365 __asm__ __volatile__(
366 "1: ldl_l %0,%4\n"
367 " bis $31,%3,%1\n"
368 " stl_c %1,%2\n"
369 " beq %1,2f\n"
370 #ifdef CONFIG_SMP
371 " mb\n"
372 #endif
373 ".subsection 2\n"
374 "2: br 1b\n"
375 ".previous"
376 : "=&r" (val), "=&r" (dummy), "=m" (*m)
377 : "rI" (val), "m" (*m) : "memory");
379 return val;
382 extern __inline__ unsigned long
383 __xchg_u64(volatile long *m, unsigned long val)
385 unsigned long dummy;
387 __asm__ __volatile__(
388 "1: ldq_l %0,%4\n"
389 " bis $31,%3,%1\n"
390 " stq_c %1,%2\n"
391 " beq %1,2f\n"
392 #ifdef CONFIG_SMP
393 " mb\n"
394 #endif
395 ".subsection 2\n"
396 "2: br 1b\n"
397 ".previous"
398 : "=&r" (val), "=&r" (dummy), "=m" (*m)
399 : "rI" (val), "m" (*m) : "memory");
401 return val;
404 /* This function doesn't exist, so you'll get a linker error
405 if something tries to do an invalid xchg(). */
406 extern void __xchg_called_with_bad_pointer(void);
408 static __inline__ unsigned long
409 __xchg(volatile void *ptr, unsigned long x, int size)
411 switch (size) {
412 case 4:
413 return __xchg_u32(ptr, x);
414 case 8:
415 return __xchg_u64(ptr, x);
417 __xchg_called_with_bad_pointer();
418 return x;
421 #define xchg(ptr,x) \
422 ({ \
423 __typeof__(*(ptr)) _x_ = (x); \
424 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
427 #define tas(ptr) (xchg((ptr),1))
431 * Atomic compare and exchange. Compare OLD with MEM, if identical,
432 * store NEW in MEM. Return the initial value in MEM. Success is
433 * indicated by comparing RETURN with OLD.
435 * The memory barrier should be placed in SMP only when we actually
436 * make the change. If we don't change anything (so if the returned
437 * prev is equal to old) then we aren't acquiring anything new and
438 * we don't need any memory barrier as far I can tell.
441 #define __HAVE_ARCH_CMPXCHG 1
443 extern __inline__ unsigned long
444 __cmpxchg_u32(volatile int *m, int old, int new)
446 unsigned long prev, cmp;
448 __asm__ __volatile__(
449 "1: ldl_l %0,%5\n"
450 " cmpeq %0,%3,%1\n"
451 " beq %1,2f\n"
452 " mov %4,%1\n"
453 " stl_c %1,%2\n"
454 " beq %1,3f\n"
455 #ifdef CONFIG_SMP
456 " mb\n"
457 #endif
458 "2:\n"
459 ".subsection 2\n"
460 "3: br 1b\n"
461 ".previous"
462 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
463 : "r"((long) old), "r"(new), "m"(*m) : "memory");
465 return prev;
468 extern __inline__ unsigned long
469 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
471 unsigned long prev, cmp;
473 __asm__ __volatile__(
474 "1: ldq_l %0,%5\n"
475 " cmpeq %0,%3,%1\n"
476 " beq %1,2f\n"
477 " mov %4,%1\n"
478 " stq_c %1,%2\n"
479 " beq %1,3f\n"
480 #ifdef CONFIG_SMP
481 " mb\n"
482 #endif
483 "2:\n"
484 ".subsection 2\n"
485 "3: br 1b\n"
486 ".previous"
487 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
488 : "r"((long) old), "r"(new), "m"(*m) : "memory");
490 return prev;
493 /* This function doesn't exist, so you'll get a linker error
494 if something tries to do an invalid cmpxchg(). */
495 extern void __cmpxchg_called_with_bad_pointer(void);
497 static __inline__ unsigned long
498 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
500 switch (size) {
501 case 4:
502 return __cmpxchg_u32(ptr, old, new);
503 case 8:
504 return __cmpxchg_u64(ptr, old, new);
506 __cmpxchg_called_with_bad_pointer();
507 return old;
510 #define cmpxchg(ptr,o,n) \
511 ({ \
512 __typeof__(*(ptr)) _o_ = (o); \
513 __typeof__(*(ptr)) _n_ = (n); \
514 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
515 (unsigned long)_n_, sizeof(*(ptr))); \
518 #endif /* __ASSEMBLY__ */
520 #endif