Merge with Linux 2.4.0-test6-pre2.
[linux-2.6/linux-mips.git] / include / asm-ia64 / system.h
blobabffefa016a9235599c473aeb32540189c119bd1
1 #ifndef _ASM_IA64_SYSTEM_H
2 #define _ASM_IA64_SYSTEM_H
4 /*
5 * System defines. Note that this is included both from .c and .S
6 * files, so it does only defines, not any C code. This is based
7 * on information published in the Processor Abstraction Layer
8 * and the System Abstraction Layer manual.
10 * Copyright (C) 1998-2000 Hewlett-Packard Co
11 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
12 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
13 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
15 #include <linux/config.h>
17 #include <asm/page.h>
19 #define KERNEL_START (PAGE_OFFSET + 0x500000)
22 * The following #defines must match with vmlinux.lds.S:
24 #define IVT_END_ADDR (KERNEL_START + 0x8000)
25 #define ZERO_PAGE_ADDR (IVT_END_ADDR + 0*PAGE_SIZE)
26 #define SWAPPER_PGD_ADDR (IVT_END_ADDR + 1*PAGE_SIZE)
28 #define GATE_ADDR (0xa000000000000000 + PAGE_SIZE)
30 #ifndef __ASSEMBLY__
32 #include <linux/types.h>
34 struct pci_vector_struct {
35 __u16 bus; /* PCI Bus number */
36 __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
37 __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
38 __u8 irq; /* IRQ assigned */
41 extern struct ia64_boot_param {
42 __u64 command_line; /* physical address of command line arguments */
43 __u64 efi_systab; /* physical address of EFI system table */
44 __u64 efi_memmap; /* physical address of EFI memory map */
45 __u64 efi_memmap_size; /* size of EFI memory map */
46 __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */
47 __u32 efi_memdesc_version; /* memory descriptor version */
48 struct {
49 __u16 num_cols; /* number of columns on console output device */
50 __u16 num_rows; /* number of rows on console output device */
51 __u16 orig_x; /* cursor's x position */
52 __u16 orig_y; /* cursor's y position */
53 } console_info;
54 __u16 num_pci_vectors; /* number of ACPI derived PCI IRQ's*/
55 __u64 pci_vectors; /* physical address of PCI data (pci_vector_struct)*/
56 __u64 fpswa; /* physical address of the the fpswa interface */
57 __u64 initrd_start;
58 __u64 initrd_size;
59 } ia64_boot_param;
61 extern inline void
62 ia64_insn_group_barrier (void)
64 __asm__ __volatile__ (";;" ::: "memory");
68 * Macros to force memory ordering. In these descriptions, "previous"
69 * and "subsequent" refer to program order; "visible" means that all
70 * architecturally visible effects of a memory access have occurred
71 * (at a minimum, this means the memory has been read or written).
73 * wmb(): Guarantees that all preceding stores to memory-
74 * like regions are visible before any subsequent
75 * stores and that all following stores will be
76 * visible only after all previous stores.
77 * rmb(): Like wmb(), but for reads.
78 * mb(): wmb()/rmb() combo, i.e., all previous memory
79 * accesses are visible before all subsequent
80 * accesses and vice versa. This is also known as
81 * a "fence."
83 * Note: "mb()" and its variants cannot be used as a fence to order
84 * accesses to memory mapped I/O registers. For that, mf.a needs to
85 * be used. However, we don't want to always use mf.a because (a)
86 * it's (presumably) much slower than mf and (b) mf.a is supported for
87 * sequential memory pages only.
89 #define mb() __asm__ __volatile__ ("mf" ::: "memory")
90 #define rmb() mb()
91 #define wmb() mb()
94 * XXX check on these---I suspect what Linus really wants here is
95 * acquire vs release semantics but we can't discuss this stuff with
96 * Linus just yet. Grrr...
98 #define set_mb(var, value) do { (var) = (value); mb(); } while (0)
99 #define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
102 * The group barrier in front of the rsm & ssm are necessary to ensure
103 * that none of the previous instructions in the same group are
104 * affected by the rsm/ssm.
106 /* For spinlocks etc */
108 #ifdef CONFIG_IA64_DEBUG_IRQ
110 extern unsigned long last_cli_ip;
112 # define local_irq_save(x) \
113 do { \
114 unsigned long ip, psr; \
116 __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \
117 if (psr & (1UL << 14)) { \
118 __asm__ ("mov %0=ip" : "=r"(ip)); \
119 last_cli_ip = ip; \
121 (x) = psr; \
122 } while (0)
124 # define local_irq_disable() \
125 do { \
126 unsigned long ip, psr; \
128 __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \
129 if (psr & (1UL << 14)) { \
130 __asm__ ("mov %0=ip" : "=r"(ip)); \
131 last_cli_ip = ip; \
133 } while (0)
135 # define local_irq_restore(x) \
136 do { \
137 unsigned long ip, old_psr, psr = (x); \
139 __asm__ __volatile__ (";;mov %0=psr; mov psr.l=%1;; srlz.d" \
140 : "=&r" (old_psr) : "r" (psr) : "memory"); \
141 if ((old_psr & (1UL << 14)) && !(psr & (1UL << 14))) { \
142 __asm__ ("mov %0=ip" : "=r"(ip)); \
143 last_cli_ip = ip; \
145 } while (0)
147 #else /* !CONFIG_IA64_DEBUG_IRQ */
148 /* clearing of psr.i is implicitly serialized (visible by next insn) */
149 # define local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" \
150 : "=r" (x) :: "memory")
151 # define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
152 /* (potentially) setting psr.i requires data serialization: */
153 # define local_irq_restore(x) __asm__ __volatile__ (";; mov psr.l=%0;; srlz.d" \
154 :: "r" (x) : "memory")
155 #endif /* !CONFIG_IA64_DEBUG_IRQ */
157 #define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
159 #define __cli() local_irq_disable ()
160 #define __save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
161 #define __save_and_cli(flags) local_irq_save(flags)
162 #define save_and_cli(flags) __save_and_cli(flags)
165 #ifdef CONFIG_IA64_SOFTSDV_HACKS
167 * Yech. SoftSDV has a slight probem with psr.i and itc/itm. If
168 * PSR.i = 0 and ITC == ITM, you don't get the timer tick posted. So,
169 * I'll check if ITC is larger than ITM here and reset if neccessary.
170 * I may miss a tick to two.
172 * Don't include asm/delay.h; it causes include loops that are
173 * mind-numbingly hard to follow.
176 #define get_itc(x) __asm__ __volatile__("mov %0=ar.itc" : "=r"((x)) :: "memory")
177 #define get_itm(x) __asm__ __volatile__("mov %0=cr.itm" : "=r"((x)) :: "memory")
178 #define set_itm(x) __asm__ __volatile__("mov cr.itm=%0" :: "r"((x)) : "memory")
180 #define __restore_flags(x) \
181 do { \
182 unsigned long itc, itm; \
183 local_irq_restore(x); \
184 get_itc(itc); \
185 get_itm(itm); \
186 if (itc > itm) \
187 set_itm(itc + 10); \
188 } while (0)
190 #define __sti() \
191 do { \
192 unsigned long itc, itm; \
193 local_irq_enable(); \
194 get_itc(itc); \
195 get_itm(itm); \
196 if (itc > itm) \
197 set_itm(itc + 10); \
198 } while (0)
200 #else /* !CONFIG_IA64_SOFTSDV_HACKS */
202 #define __sti() local_irq_enable ()
203 #define __restore_flags(flags) local_irq_restore(flags)
205 #endif /* !CONFIG_IA64_SOFTSDV_HACKS */
207 #ifdef CONFIG_SMP
208 extern void __global_cli (void);
209 extern void __global_sti (void);
210 extern unsigned long __global_save_flags (void);
211 extern void __global_restore_flags (unsigned long);
212 # define cli() __global_cli()
213 # define sti() __global_sti()
214 # define save_flags(flags) ((flags) = __global_save_flags())
215 # define restore_flags(flags) __global_restore_flags(flags)
216 #else /* !CONFIG_SMP */
217 # define cli() __cli()
218 # define sti() __sti()
219 # define save_flags(flags) __save_flags(flags)
220 # define restore_flags(flags) __restore_flags(flags)
221 #endif /* !CONFIG_SMP */
224 * Force an unresolved reference if someone tries to use
225 * ia64_fetch_and_add() with a bad value.
227 extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
228 extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
230 #define IA64_FETCHADD(tmp,v,n,sz) \
231 ({ \
232 switch (sz) { \
233 case 4: \
234 __asm__ __volatile__ ("fetchadd4.rel %0=%1,%3" \
235 : "=r"(tmp), "=m"(__atomic_fool_gcc(v)) \
236 : "m" (__atomic_fool_gcc(v)), "i"(n)); \
237 break; \
239 case 8: \
240 __asm__ __volatile__ ("fetchadd8.rel %0=%1,%3" \
241 : "=r"(tmp), "=m"(__atomic_fool_gcc(v)) \
242 : "m" (__atomic_fool_gcc(v)), "i"(n)); \
243 break; \
245 default: \
246 __bad_size_for_ia64_fetch_and_add(); \
250 #define ia64_fetch_and_add(i,v) \
251 ({ \
252 __u64 _tmp; \
253 volatile __typeof__(*(v)) *_v = (v); \
254 switch (i) { \
255 case -16: IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break; \
256 case -8: IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); break; \
257 case -4: IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); break; \
258 case -1: IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); break; \
259 case 1: IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); break; \
260 case 4: IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); break; \
261 case 8: IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); break; \
262 case 16: IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); break; \
263 default: \
264 _tmp = __bad_increment_for_ia64_fetch_and_add(); \
265 break; \
267 (__typeof__(*v)) (_tmp + (i)); /* return new value */ \
271 * This function doesn't exist, so you'll get a linker error if
272 * something tries to do an invalid xchg().
274 extern void __xchg_called_with_bad_pointer (void);
276 static __inline__ unsigned long
277 __xchg (unsigned long x, volatile void *ptr, int size)
279 unsigned long result;
281 switch (size) {
282 case 1:
283 __asm__ __volatile ("xchg1 %0=%1,%2" : "=r" (result)
284 : "m" (*(char *) ptr), "r" (x) : "memory");
285 return result;
287 case 2:
288 __asm__ __volatile ("xchg2 %0=%1,%2" : "=r" (result)
289 : "m" (*(short *) ptr), "r" (x) : "memory");
290 return result;
292 case 4:
293 __asm__ __volatile ("xchg4 %0=%1,%2" : "=r" (result)
294 : "m" (*(int *) ptr), "r" (x) : "memory");
295 return result;
297 case 8:
298 __asm__ __volatile ("xchg8 %0=%1,%2" : "=r" (result)
299 : "m" (*(long *) ptr), "r" (x) : "memory");
300 return result;
302 __xchg_called_with_bad_pointer();
303 return x;
306 #define xchg(ptr,x) \
307 ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
308 #define tas(ptr) (xchg ((ptr), 1))
311 * Atomic compare and exchange. Compare OLD with MEM, if identical,
312 * store NEW in MEM. Return the initial value in MEM. Success is
313 * indicated by comparing RETURN with OLD.
316 #define __HAVE_ARCH_CMPXCHG 1
319 * This function doesn't exist, so you'll get a linker error
320 * if something tries to do an invalid cmpxchg().
322 extern long __cmpxchg_called_with_bad_pointer(void);
324 struct __xchg_dummy { unsigned long a[100]; };
325 #define __xg(x) (*(struct __xchg_dummy *)(x))
327 #define ia64_cmpxchg(sem,ptr,old,new,size) \
328 ({ \
329 __typeof__(ptr) _p_ = (ptr); \
330 __typeof__(new) _n_ = (new); \
331 __u64 _o_, _r_; \
333 switch (size) { \
334 case 1: _o_ = (__u8 ) (old); break; \
335 case 2: _o_ = (__u16) (old); break; \
336 case 4: _o_ = (__u32) (old); break; \
337 case 8: _o_ = (__u64) (old); break; \
338 default: \
340 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
341 switch (size) { \
342 case 1: \
343 __asm__ __volatile__ ("cmpxchg1."sem" %0=%2,%3,ar.ccv" \
344 : "=r"(_r_), "=m"(__xg(_p_)) \
345 : "m"(__xg(_p_)), "r"(_n_)); \
346 break; \
348 case 2: \
349 __asm__ __volatile__ ("cmpxchg2."sem" %0=%2,%3,ar.ccv" \
350 : "=r"(_r_), "=m"(__xg(_p_)) \
351 : "m"(__xg(_p_)), "r"(_n_)); \
352 break; \
354 case 4: \
355 __asm__ __volatile__ ("cmpxchg4."sem" %0=%2,%3,ar.ccv" \
356 : "=r"(_r_), "=m"(__xg(_p_)) \
357 : "m"(__xg(_p_)), "r"(_n_)); \
358 break; \
360 case 8: \
361 __asm__ __volatile__ ("cmpxchg8."sem" %0=%2,%3,ar.ccv" \
362 : "=r"(_r_), "=m"(__xg(_p_)) \
363 : "m"(__xg(_p_)), "r"(_n_)); \
364 break; \
366 default: \
367 _r_ = __cmpxchg_called_with_bad_pointer(); \
368 break; \
370 (__typeof__(old)) _r_; \
373 #define cmpxchg_acq(ptr,o,n) ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr)))
374 #define cmpxchg_rel(ptr,o,n) ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr)))
376 /* for compatibility with other platforms: */
377 #define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
379 #ifdef CONFIG_IA64_DEBUG_CMPXCHG
380 # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
381 # define CMPXCHG_BUGCHECK(v) \
382 do { \
383 if (_cmpxchg_bugcheck_count-- <= 0) { \
384 void *ip; \
385 extern int printk(const char *fmt, ...); \
386 asm ("mov %0=ip" : "=r"(ip)); \
387 printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
388 break; \
390 } while (0)
391 #else /* !CONFIG_IA64_DEBUG_CMPXCHG */
392 # define CMPXCHG_BUGCHECK_DECL
393 # define CMPXCHG_BUGCHECK(v)
394 #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
396 #ifdef __KERNEL__
398 #define prepare_to_switch() do { } while(0)
400 #ifdef CONFIG_IA32_SUPPORT
401 # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
402 #else
403 # define IS_IA32_PROCESS(regs) 0
404 #endif
407 * Context switch from one thread to another. If the two threads have
408 * different address spaces, schedule() has already taken care of
409 * switching to the new address space by calling switch_mm().
411 * Disabling access to the fph partition and the debug-register
412 * context switch MUST be done before calling ia64_switch_to() since a
413 * newly created thread returns directly to
414 * ia64_ret_from_syscall_clear_r8.
416 extern struct task_struct *ia64_switch_to (void *next_task);
418 extern void ia64_save_extra (struct task_struct *task);
419 extern void ia64_load_extra (struct task_struct *task);
421 #define __switch_to(prev,next,last) do { \
422 if (((prev)->thread.flags & IA64_THREAD_DBG_VALID) \
423 || IS_IA32_PROCESS(ia64_task_regs(prev))) \
424 ia64_save_extra(prev); \
425 if (((next)->thread.flags & IA64_THREAD_DBG_VALID) \
426 || IS_IA32_PROCESS(ia64_task_regs(next))) \
427 ia64_load_extra(next); \
428 ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \
429 (last) = ia64_switch_to((next)); \
430 } while (0)
432 #ifdef CONFIG_SMP
434 * In the SMP case, we save the fph state when context-switching
435 * away from a thread that owned and modified fph. This way, when
436 * the thread gets scheduled on another CPU, the CPU can pick up the
437 * state frm task->thread.fph, avoiding the complication of having
438 * to fetch the latest fph state from another CPU. If the thread
439 * happens to be rescheduled on the same CPU later on and nobody
440 * else has touched the FPU in the meantime, the thread will fault
441 * upon the first access to fph but since the state in fph is still
442 * valid, no other overheads are incurred. In other words, CPU
443 * affinity is a Good Thing.
445 # define switch_to(prev,next,last) do { \
446 if (ia64_get_fpu_owner() == (prev) && ia64_psr(ia64_task_regs(prev))->mfh) { \
447 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
448 __ia64_save_fpu((prev)->thread.fph); \
450 __switch_to(prev,next,last); \
451 } while (0)
452 #else
453 # define switch_to(prev,next,last) __switch_to(prev,next,last)
454 #endif
456 #endif /* __KERNEL__ */
458 #endif /* __ASSEMBLY__ */
460 #endif /* _ASM_IA64_SYSTEM_H */