- Kai Germaschewski: ISDN update (including Makefiles)
[davej-history.git] / include / asm-ia64 / processor.h
blob0fb8f37ccd1483cdbd0f07ef296d127980135a84
1 #ifndef _ASM_IA64_PROCESSOR_H
2 #define _ASM_IA64_PROCESSOR_H
4 /*
5 * Copyright (C) 1998-2000 Hewlett-Packard Co
6 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1998, 1999 Stephane Eranian <eranian@hpl.hp.com>
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
11 * 11/24/98 S.Eranian added ia64_set_iva()
12 * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
13 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
16 #include <linux/config.h>
18 #include <asm/ptrace.h>
19 #include <asm/types.h>
21 #define IA64_NUM_DBG_REGS 8
22 #define IA64_NUM_PM_REGS 4
25 * TASK_SIZE really is a mis-named. It really is the maximum user
26 * space address (plus one). On ia-64, there are five regions of 2TB
27 * each (assuming 8KB page size), for a total of 8TB of user virtual
28 * address space.
30 #define TASK_SIZE 0xa000000000000000
33 * This decides where the kernel will search for a free chunk of vm
34 * space during mmap's.
36 #define TASK_UNMAPPED_BASE (current->thread.map_base)
39 * Bus types
41 #define EISA_bus 0
42 #define EISA_bus__is_a_macro /* for versions in ksyms.c */
43 #define MCA_bus 0
44 #define MCA_bus__is_a_macro /* for versions in ksyms.c */
46 /* Processor status register bits: */
47 #define IA64_PSR_BE_BIT 1
48 #define IA64_PSR_UP_BIT 2
49 #define IA64_PSR_AC_BIT 3
50 #define IA64_PSR_MFL_BIT 4
51 #define IA64_PSR_MFH_BIT 5
52 #define IA64_PSR_IC_BIT 13
53 #define IA64_PSR_I_BIT 14
54 #define IA64_PSR_PK_BIT 15
55 #define IA64_PSR_DT_BIT 17
56 #define IA64_PSR_DFL_BIT 18
57 #define IA64_PSR_DFH_BIT 19
58 #define IA64_PSR_SP_BIT 20
59 #define IA64_PSR_PP_BIT 21
60 #define IA64_PSR_DI_BIT 22
61 #define IA64_PSR_SI_BIT 23
62 #define IA64_PSR_DB_BIT 24
63 #define IA64_PSR_LP_BIT 25
64 #define IA64_PSR_TB_BIT 26
65 #define IA64_PSR_RT_BIT 27
66 /* The following are not affected by save_flags()/restore_flags(): */
67 #define IA64_PSR_CPL0_BIT 32
68 #define IA64_PSR_CPL1_BIT 33
69 #define IA64_PSR_IS_BIT 34
70 #define IA64_PSR_MC_BIT 35
71 #define IA64_PSR_IT_BIT 36
72 #define IA64_PSR_ID_BIT 37
73 #define IA64_PSR_DA_BIT 38
74 #define IA64_PSR_DD_BIT 39
75 #define IA64_PSR_SS_BIT 40
76 #define IA64_PSR_RI_BIT 41
77 #define IA64_PSR_ED_BIT 43
78 #define IA64_PSR_BN_BIT 44
80 #define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
81 #define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
82 #define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT)
83 #define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT)
84 #define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT)
85 #define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT)
86 #define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT)
87 #define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT)
88 #define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT)
89 #define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT)
90 #define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT)
91 #define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT)
92 #define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT)
93 #define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT)
94 #define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT)
95 #define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT)
96 #define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT)
97 #define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
98 #define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
99 /* The following are not affected by save_flags()/restore_flags(): */
100 #define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
101 #define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
102 #define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
103 #define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT)
104 #define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT)
105 #define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT)
106 #define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT)
107 #define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
108 #define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
109 #define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
111 /* User mask bits: */
112 #define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
114 /* Default Control Register */
115 #define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */
116 #define IA64_DCR_BE_BIT 1 /* big-endian default */
117 #define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */
118 #define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */
119 #define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */
120 #define IA64_DCR_DK_BIT 10 /* defer key miss faults */
121 #define IA64_DCR_DX_BIT 11 /* defer key permission faults */
122 #define IA64_DCR_DR_BIT 12 /* defer access right faults */
123 #define IA64_DCR_DA_BIT 13 /* defer access bit faults */
124 #define IA64_DCR_DD_BIT 14 /* defer debug faults */
126 #define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT)
127 #define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT)
128 #define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT)
129 #define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT)
130 #define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT)
131 #define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT)
132 #define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT)
133 #define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT)
134 #define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT)
135 #define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT)
137 /* Interrupt Status Register */
138 #define IA64_ISR_X_BIT 32 /* execute access */
139 #define IA64_ISR_W_BIT 33 /* write access */
140 #define IA64_ISR_R_BIT 34 /* read access */
141 #define IA64_ISR_NA_BIT 35 /* non-access */
142 #define IA64_ISR_SP_BIT 36 /* speculative load exception */
143 #define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */
144 #define IA64_ISR_IR_BIT 38 /* invalid register frame exception */
146 #define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT)
147 #define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT)
148 #define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT)
149 #define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT)
150 #define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT)
151 #define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT)
152 #define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT)
154 #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
155 #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
156 #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
157 #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
158 #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
159 #define IA64_THREAD_KRBS_SYNCED (__IA64_UL(1) << 5) /* krbs synced with process vm? */
160 #define IA64_KERNEL_DEATH (__IA64_UL(1) << 63) /* see die_if_kernel()... */
162 #define IA64_THREAD_UAC_SHIFT 3
163 #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
165 #ifndef __ASSEMBLY__
167 #include <linux/smp.h>
168 #include <linux/threads.h>
170 #include <asm/fpu.h>
171 #include <asm/offsets.h>
172 #include <asm/page.h>
173 #include <asm/rse.h>
174 #include <asm/unwind.h>
176 /* like above but expressed as bitfields for more efficient access: */
177 struct ia64_psr {
178 __u64 reserved0 : 1;
179 __u64 be : 1;
180 __u64 up : 1;
181 __u64 ac : 1;
182 __u64 mfl : 1;
183 __u64 mfh : 1;
184 __u64 reserved1 : 7;
185 __u64 ic : 1;
186 __u64 i : 1;
187 __u64 pk : 1;
188 __u64 reserved2 : 1;
189 __u64 dt : 1;
190 __u64 dfl : 1;
191 __u64 dfh : 1;
192 __u64 sp : 1;
193 __u64 pp : 1;
194 __u64 di : 1;
195 __u64 si : 1;
196 __u64 db : 1;
197 __u64 lp : 1;
198 __u64 tb : 1;
199 __u64 rt : 1;
200 __u64 reserved3 : 4;
201 __u64 cpl : 2;
202 __u64 is : 1;
203 __u64 mc : 1;
204 __u64 it : 1;
205 __u64 id : 1;
206 __u64 da : 1;
207 __u64 dd : 1;
208 __u64 ss : 1;
209 __u64 ri : 2;
210 __u64 ed : 1;
211 __u64 bn : 1;
212 __u64 reserved4 : 19;
216 * This shift should be large enough to be able to represent
217 * 1000000/itc_freq with good accuracy while being small enough to fit
218 * 1000000<<IA64_USEC_PER_CYC_SHIFT in 64 bits.
220 #define IA64_USEC_PER_CYC_SHIFT 41
223 * CPU type, hardware bug flags, and per-CPU state.
225 struct cpuinfo_ia64 {
226 __u64 *pgd_quick;
227 __u64 *pmd_quick;
228 __u64 *pte_quick;
229 __u64 pgtable_cache_sz;
230 /* CPUID-derived information: */
231 __u64 ppn;
232 __u64 features;
233 __u8 number;
234 __u8 revision;
235 __u8 model;
236 __u8 family;
237 __u8 archrev;
238 char vendor[16];
239 __u64 itc_freq; /* frequency of ITC counter */
240 __u64 proc_freq; /* frequency of processor */
241 __u64 cyc_per_usec; /* itc_freq/1000000 */
242 __u64 usec_per_cyc; /* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */
243 __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
244 __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
245 #ifdef CONFIG_SMP
246 __u64 loops_per_sec;
247 __u64 ipi_count;
248 __u64 prof_counter;
249 __u64 prof_multiplier;
250 #endif
253 #define my_cpu_data cpu_data[smp_processor_id()]
255 #ifdef CONFIG_SMP
256 # define ia64_loops_per_sec() my_cpu_data.loops_per_sec
257 #else
258 # define ia64_loops_per_sec() loops_per_sec
259 #endif
261 extern struct cpuinfo_ia64 cpu_data[NR_CPUS];
263 extern void identify_cpu (struct cpuinfo_ia64 *);
264 extern void print_cpu_info (struct cpuinfo_ia64 *);
266 typedef struct {
267 unsigned long seg;
268 } mm_segment_t;
270 #define SET_UNALIGN_CTL(task,value) \
271 ({ \
272 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
273 | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
274 0; \
276 #define GET_UNALIGN_CTL(task,addr) \
277 ({ \
278 put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
279 (int *) (addr)); \
282 struct siginfo;
284 struct thread_struct {
285 __u64 ksp; /* kernel stack pointer */
286 unsigned long flags; /* various flags */
287 struct ia64_fpreg fph[96]; /* saved/loaded on demand */
288 __u64 dbr[IA64_NUM_DBG_REGS];
289 __u64 ibr[IA64_NUM_DBG_REGS];
290 #ifdef CONFIG_PERFMON
291 __u64 pmc[IA64_NUM_PM_REGS];
292 __u64 pmd[IA64_NUM_PM_REGS];
293 __u64 pmod[IA64_NUM_PM_REGS];
294 # define INIT_THREAD_PM {0, }, {0, }, {0, },
295 #else
296 # define INIT_THREAD_PM
297 #endif
298 __u64 map_base; /* base address for mmap() */
299 #ifdef CONFIG_IA32_SUPPORT
300 __u64 eflag; /* IA32 EFLAGS reg */
301 __u64 fsr; /* IA32 floating pt status reg */
302 __u64 fcr; /* IA32 floating pt control reg */
303 __u64 fir; /* IA32 fp except. instr. reg */
304 __u64 fdr; /* IA32 fp except. data reg */
305 __u64 csd; /* IA32 code selector descriptor */
306 __u64 ssd; /* IA32 stack selector descriptor */
307 __u64 tssd; /* IA32 TSS descriptor */
308 __u64 old_iob; /* old IOBase value */
309 union {
310 __u64 sigmask; /* aligned mask for sigsuspend scall */
311 } un;
312 # define INIT_THREAD_IA32 , 0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, 0, {0}
313 #else
314 # define INIT_THREAD_IA32
315 #endif /* CONFIG_IA32_SUPPORT */
316 struct siginfo *siginfo; /* current siginfo struct for ptrace() */
319 #define INIT_MMAP { \
320 &init_mm, PAGE_OFFSET, PAGE_OFFSET + 0x10000000, NULL, PAGE_SHARED, \
321 VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL \
324 #define INIT_THREAD { \
325 0, /* ksp */ \
326 0, /* flags */ \
327 {{{{0}}}, }, /* fph */ \
328 {0, }, /* dbr */ \
329 {0, }, /* ibr */ \
330 INIT_THREAD_PM \
331 0x2000000000000000 /* map_base */ \
332 INIT_THREAD_IA32, \
333 0 /* siginfo */ \
336 #define start_thread(regs,new_ip,new_sp) do { \
337 set_fs(USER_DS); \
338 ia64_psr(regs)->dfh = 1; /* disable fph */ \
339 ia64_psr(regs)->mfh = 0; /* clear mfh */ \
340 ia64_psr(regs)->cpl = 3; /* set user mode */ \
341 ia64_psr(regs)->ri = 0; /* clear return slot number */ \
342 ia64_psr(regs)->is = 0; /* IA-64 instruction set */ \
343 regs->cr_iip = new_ip; \
344 regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
345 regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
346 regs->ar_bspstore = IA64_RBS_BOT; \
347 regs->ar_rnat = 0; \
348 regs->loadrs = 0; \
349 } while (0)
351 /* Forward declarations, a strange C thing... */
352 struct mm_struct;
353 struct task_struct;
356 * Free all resources held by a thread. This is called after the
357 * parent of DEAD_TASK has collected the exist status of the task via
358 * wait(). This is a no-op on IA-64.
360 #define release_thread(dead_task)
363 * This is the mechanism for creating a new kernel thread.
365 * NOTE 1: Only a kernel-only process (ie the swapper or direct
366 * descendants who haven't done an "execve()") should use this: it
367 * will work within a system call from a "real" process, but the
368 * process memory space will not be free'd until both the parent and
369 * the child have exited.
371 * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
372 * into trouble in init/main.c when the child thread returns to
373 * do_basic_setup() and the timing is such that free_initmem() has
374 * been called already.
376 extern int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
378 /* Get wait channel for task P. */
379 extern unsigned long get_wchan (struct task_struct *p);
381 /* Return instruction pointer of blocked task TSK. */
382 #define KSTK_EIP(tsk) \
383 ({ \
384 struct pt_regs *_regs = ia64_task_regs(tsk); \
385 _regs->cr_iip + ia64_psr(_regs)->ri; \
388 /* Return stack pointer of blocked task TSK. */
389 #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
391 #ifndef CONFIG_SMP
393 static inline struct task_struct *
394 ia64_get_fpu_owner (void)
396 struct task_struct *t;
397 __asm__ ("mov %0=ar.k5" : "=r"(t));
398 return t;
401 static inline void
402 ia64_set_fpu_owner (struct task_struct *t)
404 __asm__ __volatile__ ("mov ar.k5=%0" :: "r"(t));
407 #endif /* !CONFIG_SMP */
409 extern void __ia64_init_fpu (void);
410 extern void __ia64_save_fpu (struct ia64_fpreg *fph);
411 extern void __ia64_load_fpu (struct ia64_fpreg *fph);
412 extern void ia64_save_debug_regs (unsigned long *save_area);
413 extern void ia64_load_debug_regs (unsigned long *save_area);
415 #ifdef CONFIG_IA32_SUPPORT
416 extern void ia32_save_state (struct thread_struct *thread);
417 extern void ia32_load_state (struct thread_struct *thread);
418 #endif
420 #ifdef CONFIG_PERFMON
421 extern void ia64_save_pm_regs (struct thread_struct *thread);
422 extern void ia64_load_pm_regs (struct thread_struct *thread);
423 #endif
425 #define ia64_fph_enable() __asm__ __volatile__ (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
426 #define ia64_fph_disable() __asm__ __volatile__ (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
428 /* load fp 0.0 into fph */
429 static inline void
430 ia64_init_fpu (void) {
431 ia64_fph_enable();
432 __ia64_init_fpu();
433 ia64_fph_disable();
436 /* save f32-f127 at FPH */
437 static inline void
438 ia64_save_fpu (struct ia64_fpreg *fph) {
439 ia64_fph_enable();
440 __ia64_save_fpu(fph);
441 ia64_fph_disable();
444 /* load f32-f127 from FPH */
445 static inline void
446 ia64_load_fpu (struct ia64_fpreg *fph) {
447 ia64_fph_enable();
448 __ia64_load_fpu(fph);
449 ia64_fph_disable();
452 static inline void
453 ia64_fc (void *addr)
455 __asm__ __volatile__ ("fc %0" :: "r"(addr) : "memory");
458 static inline void
459 ia64_sync_i (void)
461 __asm__ __volatile__ (";; sync.i" ::: "memory");
464 static inline void
465 ia64_srlz_i (void)
467 __asm__ __volatile__ (";; srlz.i ;;" ::: "memory");
470 static inline void
471 ia64_srlz_d (void)
473 __asm__ __volatile__ (";; srlz.d" ::: "memory");
476 static inline __u64
477 ia64_get_rr (__u64 reg_bits)
479 __u64 r;
480 __asm__ __volatile__ ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory");
481 return r;
484 static inline void
485 ia64_set_rr (__u64 reg_bits, __u64 rr_val)
487 __asm__ __volatile__ ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory");
490 static inline __u64
491 ia64_get_dcr (void)
493 __u64 r;
494 __asm__ ("mov %0=cr.dcr" : "=r"(r));
495 return r;
498 static inline void
499 ia64_set_dcr (__u64 val)
501 __asm__ __volatile__ ("mov cr.dcr=%0;;" :: "r"(val) : "memory");
502 ia64_srlz_d();
505 static inline __u64
506 ia64_get_lid (void)
508 __u64 r;
509 __asm__ ("mov %0=cr.lid" : "=r"(r));
510 return r;
513 static inline void
514 ia64_invala (void)
516 __asm__ __volatile__ ("invala" ::: "memory");
520 * Save the processor status flags in FLAGS and then clear the
521 * interrupt collection and interrupt enable bits.
523 #define ia64_clear_ic(flags) \
524 __asm__ __volatile__ ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" \
525 : "=r"(flags) :: "memory");
528 * Insert a translation into an instruction and/or data translation
529 * register.
531 static inline void
532 ia64_itr (__u64 target_mask, __u64 tr_num,
533 __u64 vmaddr, __u64 pte,
534 __u64 log_page_size)
536 __asm__ __volatile__ ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
537 __asm__ __volatile__ ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
538 if (target_mask & 0x1)
539 __asm__ __volatile__ ("itr.i itr[%0]=%1"
540 :: "r"(tr_num), "r"(pte) : "memory");
541 if (target_mask & 0x2)
542 __asm__ __volatile__ (";;itr.d dtr[%0]=%1"
543 :: "r"(tr_num), "r"(pte) : "memory");
547 * Insert a translation into the instruction and/or data translation
548 * cache.
550 static inline void
551 ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
552 __u64 log_page_size)
554 __asm__ __volatile__ ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
555 __asm__ __volatile__ ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
556 /* as per EAS2.6, itc must be the last instruction in an instruction group */
557 if (target_mask & 0x1)
558 __asm__ __volatile__ ("itc.i %0;;" :: "r"(pte) : "memory");
559 if (target_mask & 0x2)
560 __asm__ __volatile__ (";;itc.d %0;;" :: "r"(pte) : "memory");
564 * Purge a range of addresses from instruction and/or data translation
565 * register(s).
567 static inline void
568 ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
570 if (target_mask & 0x1)
571 __asm__ __volatile__ ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
572 if (target_mask & 0x2)
573 __asm__ __volatile__ ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
576 /* Set the interrupt vector address. The address must be suitably aligned (32KB). */
577 static inline void
578 ia64_set_iva (void *ivt_addr)
580 __asm__ __volatile__ ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory");
583 /* Set the page table address and control bits. */
584 static inline void
585 ia64_set_pta (__u64 pta)
587 /* Note: srlz.i implies srlz.d */
588 __asm__ __volatile__ ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory");
591 static inline __u64
592 ia64_get_cpuid (__u64 regnum)
594 __u64 r;
596 __asm__ ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum));
597 return r;
600 static inline void
601 ia64_eoi (void)
603 __asm__ ("mov cr.eoi=r0;; srlz.d;;" ::: "memory");
606 static inline void
607 ia64_set_lrr0 (__u8 vector, __u8 masked)
609 if (masked > 1)
610 masked = 1;
612 __asm__ __volatile__ ("mov cr.lrr0=%0;; srlz.d"
613 :: "r"((masked << 16) | vector) : "memory");
617 static inline void
618 ia64_set_lrr1 (__u8 vector, __u8 masked)
620 if (masked > 1)
621 masked = 1;
623 __asm__ __volatile__ ("mov cr.lrr1=%0;; srlz.d"
624 :: "r"((masked << 16) | vector) : "memory");
627 static inline void
628 ia64_set_pmv (__u64 val)
630 __asm__ __volatile__ ("mov cr.pmv=%0" :: "r"(val) : "memory");
633 static inline __u64
634 ia64_get_pmc (__u64 regnum)
636 __u64 retval;
638 __asm__ __volatile__ ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum));
639 return retval;
642 static inline void
643 ia64_set_pmc (__u64 regnum, __u64 value)
645 __asm__ __volatile__ ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value));
648 static inline __u64
649 ia64_get_pmd (__u64 regnum)
651 __u64 retval;
653 __asm__ __volatile__ ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum));
654 return retval;
657 static inline void
658 ia64_set_pmd (__u64 regnum, __u64 value)
660 __asm__ __volatile__ ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value));
664 * Given the address to which a spill occurred, return the unat bit
665 * number that corresponds to this address.
667 static inline __u64
668 ia64_unat_pos (void *spill_addr)
670 return ((__u64) spill_addr >> 3) & 0x3f;
674 * Set the NaT bit of an integer register which was spilled at address
675 * SPILL_ADDR. UNAT is the mask to be updated.
677 static inline void
678 ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
680 __u64 bit = ia64_unat_pos(spill_addr);
681 __u64 mask = 1UL << bit;
683 *unat = (*unat & ~mask) | (nat << bit);
687 * Return saved PC of a blocked thread.
688 * Note that the only way T can block is through a call to schedule() -> switch_to().
690 static inline unsigned long
691 thread_saved_pc (struct thread_struct *t)
693 struct unw_frame_info info;
694 unsigned long ip;
696 /* XXX ouch: Linus, please pass the task pointer to thread_saved_pc() instead! */
697 struct task_struct *p = (void *) ((unsigned long) t - IA64_TASK_THREAD_OFFSET);
699 unw_init_from_blocked_task(&info, p);
700 if (unw_unwind(&info) < 0)
701 return 0;
702 unw_get_ip(&info, &ip);
703 return ip;
707 * Get the current instruction/program counter value.
709 #define current_text_addr() \
710 ({ void *_pc; __asm__ ("mov %0=ip" : "=r" (_pc)); _pc; })
712 #define THREAD_SIZE IA64_STK_OFFSET
713 /* NOTE: The task struct and the stacks are allocated together. */
714 #define alloc_task_struct() \
715 ((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES))
716 #define free_task_struct(p) free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES)
717 #define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
719 #define init_task (init_task_union.task)
720 #define init_stack (init_task_union.stack)
723 * Set the correctable machine check vector register
725 static inline void
726 ia64_set_cmcv (__u64 val)
728 __asm__ __volatile__ ("mov cr.cmcv=%0" :: "r"(val) : "memory");
732 * Read the correctable machine check vector register
734 static inline __u64
735 ia64_get_cmcv (void)
737 __u64 val;
739 __asm__ ("mov %0=cr.cmcv" : "=r"(val) :: "memory");
740 return val;
743 static inline __u64
744 ia64_get_ivr (void)
746 __u64 r;
747 __asm__ __volatile__ ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r));
748 return r;
751 static inline void
752 ia64_set_tpr (__u64 val)
754 __asm__ __volatile__ ("mov cr.tpr=%0" :: "r"(val));
757 static inline __u64
758 ia64_get_tpr (void)
760 __u64 r;
761 __asm__ ("mov %0=cr.tpr" : "=r"(r));
762 return r;
765 static inline void
766 ia64_set_irr0 (__u64 val)
768 __asm__ __volatile__("mov cr.irr0=%0;;" :: "r"(val) : "memory");
769 ia64_srlz_d();
772 static inline __u64
773 ia64_get_irr0 (void)
775 __u64 val;
777 /* this is volatile because irr may change unbeknownst to gcc... */
778 __asm__ __volatile__("mov %0=cr.irr0" : "=r"(val));
779 return val;
782 static inline void
783 ia64_set_irr1 (__u64 val)
785 __asm__ __volatile__("mov cr.irr1=%0;;" :: "r"(val) : "memory");
786 ia64_srlz_d();
789 static inline __u64
790 ia64_get_irr1 (void)
792 __u64 val;
794 /* this is volatile because irr may change unbeknownst to gcc... */
795 __asm__ __volatile__("mov %0=cr.irr1" : "=r"(val));
796 return val;
799 static inline void
800 ia64_set_irr2 (__u64 val)
802 __asm__ __volatile__("mov cr.irr2=%0;;" :: "r"(val) : "memory");
803 ia64_srlz_d();
806 static inline __u64
807 ia64_get_irr2 (void)
809 __u64 val;
811 /* this is volatile because irr may change unbeknownst to gcc... */
812 __asm__ __volatile__("mov %0=cr.irr2" : "=r"(val));
813 return val;
816 static inline void
817 ia64_set_irr3 (__u64 val)
819 __asm__ __volatile__("mov cr.irr3=%0;;" :: "r"(val) : "memory");
820 ia64_srlz_d();
823 static inline __u64
824 ia64_get_irr3 (void)
826 __u64 val;
828 /* this is volatile because irr may change unbeknownst to gcc... */
829 __asm__ __volatile__("mov %0=cr.irr3" : "=r"(val));
830 return val;
833 static inline __u64
834 ia64_get_gp(void)
836 __u64 val;
838 __asm__ ("mov %0=gp" : "=r"(val));
839 return val;
842 /* XXX remove the handcoded version once we have a sufficiently clever compiler... */
843 #ifdef SMART_COMPILER
844 # define ia64_rotr(w,n) \
845 ({ \
846 __u64 _w = (w), _n = (n); \
848 (_w >> _n) | (_w << (64 - _n)); \
850 #else
851 # define ia64_rotr(w,n) \
852 ({ \
853 __u64 result; \
854 asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n)); \
855 result; \
857 #endif
859 #define ia64_rotl(w,n) ia64_rotr((w),(64)-(n))
861 static inline __u64
862 ia64_thash (__u64 addr)
864 __u64 result;
865 asm ("thash %0=%1" : "=r"(result) : "r" (addr));
866 return result;
869 #endif /* !__ASSEMBLY__ */
871 #endif /* _ASM_IA64_PROCESSOR_H */