1 #ifndef _ASM_IA64_PROCESSOR_H
2 #define _ASM_IA64_PROCESSOR_H
5 * Copyright (C) 1998-2000 Hewlett-Packard Co
6 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1998, 1999 Stephane Eranian <eranian@hpl.hp.com>
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
11 * 11/24/98 S.Eranian added ia64_set_iva()
12 * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
13 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
16 #include <linux/config.h>
18 #include <asm/ptrace.h>
19 #include <asm/types.h>
21 #define IA64_NUM_DBG_REGS 8
22 #define IA64_NUM_PM_REGS 4
25 * TASK_SIZE really is a mis-named. It really is the maximum user
26 * space address (plus one). On ia-64, there are five regions of 2TB
27 * each (assuming 8KB page size), for a total of 8TB of user virtual
30 #define TASK_SIZE 0xa000000000000000
33 * This decides where the kernel will search for a free chunk of vm
34 * space during mmap's.
36 #define TASK_UNMAPPED_BASE (current->thread.map_base)
42 #define EISA_bus__is_a_macro /* for versions in ksyms.c */
44 #define MCA_bus__is_a_macro /* for versions in ksyms.c */
46 /* Processor status register bits: */
47 #define IA64_PSR_BE_BIT 1
48 #define IA64_PSR_UP_BIT 2
49 #define IA64_PSR_AC_BIT 3
50 #define IA64_PSR_MFL_BIT 4
51 #define IA64_PSR_MFH_BIT 5
52 #define IA64_PSR_IC_BIT 13
53 #define IA64_PSR_I_BIT 14
54 #define IA64_PSR_PK_BIT 15
55 #define IA64_PSR_DT_BIT 17
56 #define IA64_PSR_DFL_BIT 18
57 #define IA64_PSR_DFH_BIT 19
58 #define IA64_PSR_SP_BIT 20
59 #define IA64_PSR_PP_BIT 21
60 #define IA64_PSR_DI_BIT 22
61 #define IA64_PSR_SI_BIT 23
62 #define IA64_PSR_DB_BIT 24
63 #define IA64_PSR_LP_BIT 25
64 #define IA64_PSR_TB_BIT 26
65 #define IA64_PSR_RT_BIT 27
66 /* The following are not affected by save_flags()/restore_flags(): */
67 #define IA64_PSR_CPL0_BIT 32
68 #define IA64_PSR_CPL1_BIT 33
69 #define IA64_PSR_IS_BIT 34
70 #define IA64_PSR_MC_BIT 35
71 #define IA64_PSR_IT_BIT 36
72 #define IA64_PSR_ID_BIT 37
73 #define IA64_PSR_DA_BIT 38
74 #define IA64_PSR_DD_BIT 39
75 #define IA64_PSR_SS_BIT 40
76 #define IA64_PSR_RI_BIT 41
77 #define IA64_PSR_ED_BIT 43
78 #define IA64_PSR_BN_BIT 44
80 #define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
81 #define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
82 #define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT)
83 #define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT)
84 #define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT)
85 #define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT)
86 #define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT)
87 #define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT)
88 #define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT)
89 #define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT)
90 #define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT)
91 #define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT)
92 #define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT)
93 #define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT)
94 #define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT)
95 #define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT)
96 #define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT)
97 #define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
98 #define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
99 /* The following are not affected by save_flags()/restore_flags(): */
100 #define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
101 #define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
102 #define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
103 #define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT)
104 #define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT)
105 #define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT)
106 #define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT)
107 #define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
108 #define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
109 #define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
111 /* User mask bits: */
112 #define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
114 /* Default Control Register */
115 #define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */
116 #define IA64_DCR_BE_BIT 1 /* big-endian default */
117 #define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */
118 #define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */
119 #define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */
120 #define IA64_DCR_DK_BIT 10 /* defer key miss faults */
121 #define IA64_DCR_DX_BIT 11 /* defer key permission faults */
122 #define IA64_DCR_DR_BIT 12 /* defer access right faults */
123 #define IA64_DCR_DA_BIT 13 /* defer access bit faults */
124 #define IA64_DCR_DD_BIT 14 /* defer debug faults */
126 #define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT)
127 #define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT)
128 #define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT)
129 #define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT)
130 #define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT)
131 #define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT)
132 #define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT)
133 #define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT)
134 #define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT)
135 #define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT)
137 /* Interrupt Status Register */
138 #define IA64_ISR_X_BIT 32 /* execute access */
139 #define IA64_ISR_W_BIT 33 /* write access */
140 #define IA64_ISR_R_BIT 34 /* read access */
141 #define IA64_ISR_NA_BIT 35 /* non-access */
142 #define IA64_ISR_SP_BIT 36 /* speculative load exception */
143 #define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */
144 #define IA64_ISR_IR_BIT 38 /* invalid register frame exception */
146 #define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT)
147 #define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT)
148 #define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT)
149 #define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT)
150 #define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT)
151 #define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT)
152 #define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT)
154 #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
155 #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
156 #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
157 #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
158 #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
159 #define IA64_THREAD_KRBS_SYNCED (__IA64_UL(1) << 5) /* krbs synced with process vm? */
160 #define IA64_KERNEL_DEATH (__IA64_UL(1) << 63) /* see die_if_kernel()... */
162 #define IA64_THREAD_UAC_SHIFT 3
163 #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
167 #include <linux/smp.h>
168 #include <linux/threads.h>
171 #include <asm/offsets.h>
172 #include <asm/page.h>
174 #include <asm/unwind.h>
176 /* like above but expressed as bitfields for more efficient access: */
212 __u64 reserved4
: 19;
216 * This shift should be large enough to be able to represent
217 * 1000000/itc_freq with good accuracy while being small enough to fit
218 * 1000000<<IA64_USEC_PER_CYC_SHIFT in 64 bits.
220 #define IA64_USEC_PER_CYC_SHIFT 41
223 * CPU type, hardware bug flags, and per-CPU state.
225 struct cpuinfo_ia64
{
229 __u64 pgtable_cache_sz
;
230 /* CPUID-derived information: */
239 __u64 itc_freq
; /* frequency of ITC counter */
240 __u64 proc_freq
; /* frequency of processor */
241 __u64 cyc_per_usec
; /* itc_freq/1000000 */
242 __u64 usec_per_cyc
; /* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */
243 __u64 unimpl_va_mask
; /* mask of unimplemented virtual address bits (from PAL) */
244 __u64 unimpl_pa_mask
; /* mask of unimplemented physical address bits (from PAL) */
249 __u64 prof_multiplier
;
253 #define my_cpu_data cpu_data[smp_processor_id()]
256 # define ia64_loops_per_sec() my_cpu_data.loops_per_sec
258 # define ia64_loops_per_sec() loops_per_sec
261 extern struct cpuinfo_ia64 cpu_data
[NR_CPUS
];
263 extern void identify_cpu (struct cpuinfo_ia64
*);
264 extern void print_cpu_info (struct cpuinfo_ia64
*);
270 #define SET_UNALIGN_CTL(task,value) \
272 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
273 | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
276 #define GET_UNALIGN_CTL(task,addr) \
278 put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
284 struct thread_struct
{
285 __u64 ksp
; /* kernel stack pointer */
286 unsigned long flags
; /* various flags */
287 struct ia64_fpreg fph
[96]; /* saved/loaded on demand */
288 __u64 dbr
[IA64_NUM_DBG_REGS
];
289 __u64 ibr
[IA64_NUM_DBG_REGS
];
290 #ifdef CONFIG_PERFMON
291 __u64 pmc
[IA64_NUM_PM_REGS
];
292 __u64 pmd
[IA64_NUM_PM_REGS
];
293 __u64 pmod
[IA64_NUM_PM_REGS
];
294 # define INIT_THREAD_PM {0, }, {0, }, {0, },
296 # define INIT_THREAD_PM
298 __u64 map_base
; /* base address for mmap() */
299 #ifdef CONFIG_IA32_SUPPORT
300 __u64 eflag
; /* IA32 EFLAGS reg */
301 __u64 fsr
; /* IA32 floating pt status reg */
302 __u64 fcr
; /* IA32 floating pt control reg */
303 __u64 fir
; /* IA32 fp except. instr. reg */
304 __u64 fdr
; /* IA32 fp except. data reg */
305 __u64 csd
; /* IA32 code selector descriptor */
306 __u64 ssd
; /* IA32 stack selector descriptor */
307 __u64 tssd
; /* IA32 TSS descriptor */
308 __u64 old_iob
; /* old IOBase value */
310 __u64 sigmask
; /* aligned mask for sigsuspend scall */
312 # define INIT_THREAD_IA32 , 0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, 0, {0}
314 # define INIT_THREAD_IA32
315 #endif /* CONFIG_IA32_SUPPORT */
316 struct siginfo
*siginfo
; /* current siginfo struct for ptrace() */
319 #define INIT_MMAP { \
320 &init_mm, PAGE_OFFSET, PAGE_OFFSET + 0x10000000, NULL, PAGE_SHARED, \
321 VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL \
324 #define INIT_THREAD { \
327 {{{{0}}}, }, /* fph */ \
331 0x2000000000000000 /* map_base */ \
336 #define start_thread(regs,new_ip,new_sp) do { \
338 ia64_psr(regs)->dfh = 1; /* disable fph */ \
339 ia64_psr(regs)->mfh = 0; /* clear mfh */ \
340 ia64_psr(regs)->cpl = 3; /* set user mode */ \
341 ia64_psr(regs)->ri = 0; /* clear return slot number */ \
342 ia64_psr(regs)->is = 0; /* IA-64 instruction set */ \
343 regs->cr_iip = new_ip; \
344 regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
345 regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
346 regs->ar_bspstore = IA64_RBS_BOT; \
351 /* Forward declarations, a strange C thing... */
356 * Free all resources held by a thread. This is called after the
357 * parent of DEAD_TASK has collected the exist status of the task via
358 * wait(). This is a no-op on IA-64.
360 #define release_thread(dead_task)
363 * This is the mechanism for creating a new kernel thread.
365 * NOTE 1: Only a kernel-only process (ie the swapper or direct
366 * descendants who haven't done an "execve()") should use this: it
367 * will work within a system call from a "real" process, but the
368 * process memory space will not be free'd until both the parent and
369 * the child have exited.
371 * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
372 * into trouble in init/main.c when the child thread returns to
373 * do_basic_setup() and the timing is such that free_initmem() has
374 * been called already.
376 extern int kernel_thread (int (*fn
)(void *), void *arg
, unsigned long flags
);
378 /* Get wait channel for task P. */
379 extern unsigned long get_wchan (struct task_struct
*p
);
381 /* Return instruction pointer of blocked task TSK. */
382 #define KSTK_EIP(tsk) \
384 struct pt_regs *_regs = ia64_task_regs(tsk); \
385 _regs->cr_iip + ia64_psr(_regs)->ri; \
388 /* Return stack pointer of blocked task TSK. */
389 #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
393 static inline struct task_struct
*
394 ia64_get_fpu_owner (void)
396 struct task_struct
*t
;
397 __asm__ ("mov %0=ar.k5" : "=r"(t
));
402 ia64_set_fpu_owner (struct task_struct
*t
)
404 __asm__
__volatile__ ("mov ar.k5=%0" :: "r"(t
));
407 #endif /* !CONFIG_SMP */
409 extern void __ia64_init_fpu (void);
410 extern void __ia64_save_fpu (struct ia64_fpreg
*fph
);
411 extern void __ia64_load_fpu (struct ia64_fpreg
*fph
);
412 extern void ia64_save_debug_regs (unsigned long *save_area
);
413 extern void ia64_load_debug_regs (unsigned long *save_area
);
415 #ifdef CONFIG_IA32_SUPPORT
416 extern void ia32_save_state (struct thread_struct
*thread
);
417 extern void ia32_load_state (struct thread_struct
*thread
);
420 #ifdef CONFIG_PERFMON
421 extern void ia64_save_pm_regs (struct thread_struct
*thread
);
422 extern void ia64_load_pm_regs (struct thread_struct
*thread
);
425 #define ia64_fph_enable() __asm__ __volatile__ (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
426 #define ia64_fph_disable() __asm__ __volatile__ (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
428 /* load fp 0.0 into fph */
430 ia64_init_fpu (void) {
436 /* save f32-f127 at FPH */
438 ia64_save_fpu (struct ia64_fpreg
*fph
) {
440 __ia64_save_fpu(fph
);
444 /* load f32-f127 from FPH */
446 ia64_load_fpu (struct ia64_fpreg
*fph
) {
448 __ia64_load_fpu(fph
);
455 __asm__
__volatile__ ("fc %0" :: "r"(addr
) : "memory");
461 __asm__
__volatile__ (";; sync.i" ::: "memory");
467 __asm__
__volatile__ (";; srlz.i ;;" ::: "memory");
473 __asm__
__volatile__ (";; srlz.d" ::: "memory");
477 ia64_get_rr (__u64 reg_bits
)
480 __asm__
__volatile__ ("mov %0=rr[%1]" : "=r"(r
) : "r"(reg_bits
) : "memory");
485 ia64_set_rr (__u64 reg_bits
, __u64 rr_val
)
487 __asm__
__volatile__ ("mov rr[%0]=%1" :: "r"(reg_bits
), "r"(rr_val
) : "memory");
494 __asm__ ("mov %0=cr.dcr" : "=r"(r
));
499 ia64_set_dcr (__u64 val
)
501 __asm__
__volatile__ ("mov cr.dcr=%0;;" :: "r"(val
) : "memory");
509 __asm__ ("mov %0=cr.lid" : "=r"(r
));
516 __asm__
__volatile__ ("invala" ::: "memory");
520 * Save the processor status flags in FLAGS and then clear the
521 * interrupt collection and interrupt enable bits.
523 #define ia64_clear_ic(flags) \
524 __asm__ __volatile__ ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" \
525 : "=r"(flags) :: "memory");
528 * Insert a translation into an instruction and/or data translation
532 ia64_itr (__u64 target_mask
, __u64 tr_num
,
533 __u64 vmaddr
, __u64 pte
,
536 __asm__
__volatile__ ("mov cr.itir=%0" :: "r"(log_page_size
<< 2) : "memory");
537 __asm__
__volatile__ ("mov cr.ifa=%0;;" :: "r"(vmaddr
) : "memory");
538 if (target_mask
& 0x1)
539 __asm__
__volatile__ ("itr.i itr[%0]=%1"
540 :: "r"(tr_num
), "r"(pte
) : "memory");
541 if (target_mask
& 0x2)
542 __asm__
__volatile__ (";;itr.d dtr[%0]=%1"
543 :: "r"(tr_num
), "r"(pte
) : "memory");
547 * Insert a translation into the instruction and/or data translation
551 ia64_itc (__u64 target_mask
, __u64 vmaddr
, __u64 pte
,
554 __asm__
__volatile__ ("mov cr.itir=%0" :: "r"(log_page_size
<< 2) : "memory");
555 __asm__
__volatile__ ("mov cr.ifa=%0;;" :: "r"(vmaddr
) : "memory");
556 /* as per EAS2.6, itc must be the last instruction in an instruction group */
557 if (target_mask
& 0x1)
558 __asm__
__volatile__ ("itc.i %0;;" :: "r"(pte
) : "memory");
559 if (target_mask
& 0x2)
560 __asm__
__volatile__ (";;itc.d %0;;" :: "r"(pte
) : "memory");
564 * Purge a range of addresses from instruction and/or data translation
568 ia64_ptr (__u64 target_mask
, __u64 vmaddr
, __u64 log_size
)
570 if (target_mask
& 0x1)
571 __asm__
__volatile__ ("ptr.i %0,%1" :: "r"(vmaddr
), "r"(log_size
<< 2));
572 if (target_mask
& 0x2)
573 __asm__
__volatile__ ("ptr.d %0,%1" :: "r"(vmaddr
), "r"(log_size
<< 2));
576 /* Set the interrupt vector address. The address must be suitably aligned (32KB). */
578 ia64_set_iva (void *ivt_addr
)
580 __asm__
__volatile__ ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr
) : "memory");
583 /* Set the page table address and control bits. */
585 ia64_set_pta (__u64 pta
)
587 /* Note: srlz.i implies srlz.d */
588 __asm__
__volatile__ ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta
) : "memory");
592 ia64_get_cpuid (__u64 regnum
)
596 __asm__ ("mov %0=cpuid[%r1]" : "=r"(r
) : "rO"(regnum
));
603 __asm__ ("mov cr.eoi=r0;; srlz.d;;" ::: "memory");
607 ia64_set_lrr0 (__u8 vector
, __u8 masked
)
612 __asm__
__volatile__ ("mov cr.lrr0=%0;; srlz.d"
613 :: "r"((masked
<< 16) | vector
) : "memory");
618 ia64_set_lrr1 (__u8 vector
, __u8 masked
)
623 __asm__
__volatile__ ("mov cr.lrr1=%0;; srlz.d"
624 :: "r"((masked
<< 16) | vector
) : "memory");
628 ia64_set_pmv (__u64 val
)
630 __asm__
__volatile__ ("mov cr.pmv=%0" :: "r"(val
) : "memory");
634 ia64_get_pmc (__u64 regnum
)
638 __asm__
__volatile__ ("mov %0=pmc[%1]" : "=r"(retval
) : "r"(regnum
));
643 ia64_set_pmc (__u64 regnum
, __u64 value
)
645 __asm__
__volatile__ ("mov pmc[%0]=%1" :: "r"(regnum
), "r"(value
));
649 ia64_get_pmd (__u64 regnum
)
653 __asm__
__volatile__ ("mov %0=pmd[%1]" : "=r"(retval
) : "r"(regnum
));
658 ia64_set_pmd (__u64 regnum
, __u64 value
)
660 __asm__
__volatile__ ("mov pmd[%0]=%1" :: "r"(regnum
), "r"(value
));
664 * Given the address to which a spill occurred, return the unat bit
665 * number that corresponds to this address.
668 ia64_unat_pos (void *spill_addr
)
670 return ((__u64
) spill_addr
>> 3) & 0x3f;
674 * Set the NaT bit of an integer register which was spilled at address
675 * SPILL_ADDR. UNAT is the mask to be updated.
678 ia64_set_unat (__u64
*unat
, void *spill_addr
, unsigned long nat
)
680 __u64 bit
= ia64_unat_pos(spill_addr
);
681 __u64 mask
= 1UL << bit
;
683 *unat
= (*unat
& ~mask
) | (nat
<< bit
);
687 * Return saved PC of a blocked thread.
688 * Note that the only way T can block is through a call to schedule() -> switch_to().
690 static inline unsigned long
691 thread_saved_pc (struct thread_struct
*t
)
693 struct unw_frame_info info
;
696 /* XXX ouch: Linus, please pass the task pointer to thread_saved_pc() instead! */
697 struct task_struct
*p
= (void *) ((unsigned long) t
- IA64_TASK_THREAD_OFFSET
);
699 unw_init_from_blocked_task(&info
, p
);
700 if (unw_unwind(&info
) < 0)
702 unw_get_ip(&info
, &ip
);
707 * Get the current instruction/program counter value.
709 #define current_text_addr() \
710 ({ void *_pc; __asm__ ("mov %0=ip" : "=r" (_pc)); _pc; })
712 #define THREAD_SIZE IA64_STK_OFFSET
713 /* NOTE: The task struct and the stacks are allocated together. */
714 #define alloc_task_struct() \
715 ((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES))
716 #define free_task_struct(p) free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES)
717 #define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
719 #define init_task (init_task_union.task)
720 #define init_stack (init_task_union.stack)
723 * Set the correctable machine check vector register
726 ia64_set_cmcv (__u64 val
)
728 __asm__
__volatile__ ("mov cr.cmcv=%0" :: "r"(val
) : "memory");
732 * Read the correctable machine check vector register
739 __asm__ ("mov %0=cr.cmcv" : "=r"(val
) :: "memory");
747 __asm__
__volatile__ ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r
));
752 ia64_set_tpr (__u64 val
)
754 __asm__
__volatile__ ("mov cr.tpr=%0" :: "r"(val
));
761 __asm__ ("mov %0=cr.tpr" : "=r"(r
));
766 ia64_set_irr0 (__u64 val
)
768 __asm__
__volatile__("mov cr.irr0=%0;;" :: "r"(val
) : "memory");
777 /* this is volatile because irr may change unbeknownst to gcc... */
778 __asm__
__volatile__("mov %0=cr.irr0" : "=r"(val
));
783 ia64_set_irr1 (__u64 val
)
785 __asm__
__volatile__("mov cr.irr1=%0;;" :: "r"(val
) : "memory");
794 /* this is volatile because irr may change unbeknownst to gcc... */
795 __asm__
__volatile__("mov %0=cr.irr1" : "=r"(val
));
800 ia64_set_irr2 (__u64 val
)
802 __asm__
__volatile__("mov cr.irr2=%0;;" :: "r"(val
) : "memory");
811 /* this is volatile because irr may change unbeknownst to gcc... */
812 __asm__
__volatile__("mov %0=cr.irr2" : "=r"(val
));
817 ia64_set_irr3 (__u64 val
)
819 __asm__
__volatile__("mov cr.irr3=%0;;" :: "r"(val
) : "memory");
828 /* this is volatile because irr may change unbeknownst to gcc... */
829 __asm__
__volatile__("mov %0=cr.irr3" : "=r"(val
));
838 __asm__ ("mov %0=gp" : "=r"(val
));
842 /* XXX remove the handcoded version once we have a sufficiently clever compiler... */
843 #ifdef SMART_COMPILER
844 # define ia64_rotr(w,n) \
846 __u64 _w = (w), _n = (n); \
848 (_w >> _n) | (_w << (64 - _n)); \
851 # define ia64_rotr(w,n) \
854 asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n)); \
859 #define ia64_rotl(w,n) ia64_rotr((w),(64)-(n))
862 ia64_thash (__u64 addr
)
865 asm ("thash %0=%1" : "=r"(result
) : "r" (addr
));
869 #endif /* !__ASSEMBLY__ */
871 #endif /* _ASM_IA64_PROCESSOR_H */