[IA64] hooks to wait for mmio writes to drain when migrating processes
[linux-2.6/x86.git] / include / asm-ia64 / processor.h
blob29d5574d4375560ceb569aadbed106ec7e7e6450
1 #ifndef _ASM_IA64_PROCESSOR_H
2 #define _ASM_IA64_PROCESSOR_H
4 /*
5 * Copyright (C) 1998-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Stephane Eranian <eranian@hpl.hp.com>
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
11 * 11/24/98 S.Eranian added ia64_set_iva()
12 * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
13 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
16 #include <linux/config.h>
18 #include <asm/intrinsics.h>
19 #include <asm/kregs.h>
20 #include <asm/ptrace.h>
21 #include <asm/ustack.h>
23 #define IA64_NUM_DBG_REGS 8
25 * Limits for PMC and PMD are set to less than maximum architected values
26 * but should be sufficient for a while
28 #define IA64_NUM_PMC_REGS 64
29 #define IA64_NUM_PMD_REGS 64
31 #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
32 #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
35 * TASK_SIZE really is a mis-named. It really is the maximum user
36 * space address (plus one). On IA-64, there are five regions of 2TB
37 * each (assuming 8KB page size), for a total of 8TB of user virtual
38 * address space.
40 #define TASK_SIZE (current->thread.task_size)
43 * This decides where the kernel will search for a free chunk of vm
44 * space during mmap's.
46 #define TASK_UNMAPPED_BASE (current->thread.map_base)
48 #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
49 #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
50 #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
51 #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
52 #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
53 #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration
54 sync at ctx sw */
55 #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
56 #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
58 #define IA64_THREAD_UAC_SHIFT 3
59 #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
60 #define IA64_THREAD_FPEMU_SHIFT 6
61 #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
65 * This shift should be large enough to be able to represent 1000000000/itc_freq with good
66 * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
67 * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
69 #define IA64_NSEC_PER_CYC_SHIFT 30
71 #ifndef __ASSEMBLY__
73 #include <linux/cache.h>
74 #include <linux/compiler.h>
75 #include <linux/threads.h>
76 #include <linux/types.h>
78 #include <asm/fpu.h>
79 #include <asm/page.h>
80 #include <asm/percpu.h>
81 #include <asm/rse.h>
82 #include <asm/unwind.h>
83 #include <asm/atomic.h>
84 #ifdef CONFIG_NUMA
85 #include <asm/nodedata.h>
86 #endif
88 /* like above but expressed as bitfields for more efficient access: */
89 struct ia64_psr {
90 __u64 reserved0 : 1;
91 __u64 be : 1;
92 __u64 up : 1;
93 __u64 ac : 1;
94 __u64 mfl : 1;
95 __u64 mfh : 1;
96 __u64 reserved1 : 7;
97 __u64 ic : 1;
98 __u64 i : 1;
99 __u64 pk : 1;
100 __u64 reserved2 : 1;
101 __u64 dt : 1;
102 __u64 dfl : 1;
103 __u64 dfh : 1;
104 __u64 sp : 1;
105 __u64 pp : 1;
106 __u64 di : 1;
107 __u64 si : 1;
108 __u64 db : 1;
109 __u64 lp : 1;
110 __u64 tb : 1;
111 __u64 rt : 1;
112 __u64 reserved3 : 4;
113 __u64 cpl : 2;
114 __u64 is : 1;
115 __u64 mc : 1;
116 __u64 it : 1;
117 __u64 id : 1;
118 __u64 da : 1;
119 __u64 dd : 1;
120 __u64 ss : 1;
121 __u64 ri : 2;
122 __u64 ed : 1;
123 __u64 bn : 1;
124 __u64 reserved4 : 19;
128 * CPU type, hardware bug flags, and per-CPU state. Frequently used
129 * state comes earlier:
131 struct cpuinfo_ia64 {
132 __u32 softirq_pending;
133 __u64 itm_delta; /* # of clock cycles between clock ticks */
134 __u64 itm_next; /* interval timer mask value to use for next clock tick */
135 __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
136 __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
137 __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
138 __u64 itc_freq; /* frequency of ITC counter */
139 __u64 proc_freq; /* frequency of processor */
140 __u64 cyc_per_usec; /* itc_freq/1000000 */
141 __u64 ptce_base;
142 __u32 ptce_count[2];
143 __u32 ptce_stride[2];
144 struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
146 #ifdef CONFIG_SMP
147 __u64 loops_per_jiffy;
148 int cpu;
149 __u32 socket_id; /* physical processor socket id */
150 __u16 core_id; /* core id */
151 __u16 thread_id; /* thread id */
152 __u16 num_log; /* Total number of logical processors on
153 * this socket that were successfully booted */
154 __u8 cores_per_socket; /* Cores per processor socket */
155 __u8 threads_per_core; /* Threads per core */
156 #endif
158 /* CPUID-derived information: */
159 __u64 ppn;
160 __u64 features;
161 __u8 number;
162 __u8 revision;
163 __u8 model;
164 __u8 family;
165 __u8 archrev;
166 char vendor[16];
168 #ifdef CONFIG_NUMA
169 struct ia64_node_data *node_data;
170 #endif
173 DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
176 * The "local" data variable. It refers to the per-CPU data of the currently executing
177 * CPU, much like "current" points to the per-task data of the currently executing task.
178 * Do not use the address of local_cpu_data, since it will be different from
179 * cpu_data(smp_processor_id())!
181 #define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
182 #define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
184 extern void identify_cpu (struct cpuinfo_ia64 *);
185 extern void print_cpu_info (struct cpuinfo_ia64 *);
187 typedef struct {
188 unsigned long seg;
189 } mm_segment_t;
191 #define SET_UNALIGN_CTL(task,value) \
192 ({ \
193 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
194 | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
195 0; \
197 #define GET_UNALIGN_CTL(task,addr) \
198 ({ \
199 put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
200 (int __user *) (addr)); \
203 #define SET_FPEMU_CTL(task,value) \
204 ({ \
205 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
206 | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
207 0; \
209 #define GET_FPEMU_CTL(task,addr) \
210 ({ \
211 put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
212 (int __user *) (addr)); \
215 #ifdef CONFIG_IA32_SUPPORT
216 struct desc_struct {
217 unsigned int a, b;
220 #define desc_empty(desc) (!((desc)->a + (desc)->b))
221 #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
223 #define GDT_ENTRY_TLS_ENTRIES 3
224 #define GDT_ENTRY_TLS_MIN 6
225 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
227 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
229 struct partial_page_list;
230 #endif
232 struct thread_struct {
233 __u32 flags; /* various thread flags (see IA64_THREAD_*) */
234 /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
235 __u8 on_ustack; /* executing on user-stacks? */
236 __u8 pad[3];
237 __u64 ksp; /* kernel stack pointer */
238 __u64 map_base; /* base address for get_unmapped_area() */
239 __u64 task_size; /* limit for task size */
240 __u64 rbs_bot; /* the base address for the RBS */
241 int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
243 #ifdef CONFIG_IA32_SUPPORT
244 __u64 eflag; /* IA32 EFLAGS reg */
245 __u64 fsr; /* IA32 floating pt status reg */
246 __u64 fcr; /* IA32 floating pt control reg */
247 __u64 fir; /* IA32 fp except. instr. reg */
248 __u64 fdr; /* IA32 fp except. data reg */
249 __u64 old_k1; /* old value of ar.k1 */
250 __u64 old_iob; /* old IOBase value */
251 struct partial_page_list *ppl; /* partial page list for 4K page size issue */
252 /* cached TLS descriptors. */
253 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
255 # define INIT_THREAD_IA32 .eflag = 0, \
256 .fsr = 0, \
257 .fcr = 0x17800000037fULL, \
258 .fir = 0, \
259 .fdr = 0, \
260 .old_k1 = 0, \
261 .old_iob = 0, \
262 .ppl = NULL,
263 #else
264 # define INIT_THREAD_IA32
265 #endif /* CONFIG_IA32_SUPPORT */
266 #ifdef CONFIG_PERFMON
267 __u64 pmcs[IA64_NUM_PMC_REGS];
268 __u64 pmds[IA64_NUM_PMD_REGS];
269 void *pfm_context; /* pointer to detailed PMU context */
270 unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
271 # define INIT_THREAD_PM .pmcs = {0UL, }, \
272 .pmds = {0UL, }, \
273 .pfm_context = NULL, \
274 .pfm_needs_checking = 0UL,
275 #else
276 # define INIT_THREAD_PM
277 #endif
278 __u64 dbr[IA64_NUM_DBG_REGS];
279 __u64 ibr[IA64_NUM_DBG_REGS];
280 struct ia64_fpreg fph[96]; /* saved/loaded on demand */
283 #define INIT_THREAD { \
284 .flags = 0, \
285 .on_ustack = 0, \
286 .ksp = 0, \
287 .map_base = DEFAULT_MAP_BASE, \
288 .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
289 .task_size = DEFAULT_TASK_SIZE, \
290 .last_fph_cpu = -1, \
291 INIT_THREAD_IA32 \
292 INIT_THREAD_PM \
293 .dbr = {0, }, \
294 .ibr = {0, }, \
295 .fph = {{{{0}}}, } \
298 #define start_thread(regs,new_ip,new_sp) do { \
299 set_fs(USER_DS); \
300 regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
301 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
302 regs->cr_iip = new_ip; \
303 regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
304 regs->ar_rnat = 0; \
305 regs->ar_bspstore = current->thread.rbs_bot; \
306 regs->ar_fpsr = FPSR_DEFAULT; \
307 regs->loadrs = 0; \
308 regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
309 regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
310 if (unlikely(!current->mm->dumpable)) { \
311 /* \
312 * Zap scratch regs to avoid leaking bits between processes with different \
313 * uid/privileges. \
314 */ \
315 regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
316 regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
318 } while (0)
320 /* Forward declarations, a strange C thing... */
321 struct mm_struct;
322 struct task_struct;
325 * Free all resources held by a thread. This is called after the
326 * parent of DEAD_TASK has collected the exit status of the task via
327 * wait().
329 #define release_thread(dead_task)
331 /* Prepare to copy thread state - unlazy all lazy status */
332 #define prepare_to_copy(tsk) do { } while (0)
335 * This is the mechanism for creating a new kernel thread.
337 * NOTE 1: Only a kernel-only process (ie the swapper or direct
338 * descendants who haven't done an "execve()") should use this: it
339 * will work within a system call from a "real" process, but the
340 * process memory space will not be free'd until both the parent and
341 * the child have exited.
343 * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
344 * into trouble in init/main.c when the child thread returns to
345 * do_basic_setup() and the timing is such that free_initmem() has
346 * been called already.
348 extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
350 /* Get wait channel for task P. */
351 extern unsigned long get_wchan (struct task_struct *p);
353 /* Return instruction pointer of blocked task TSK. */
354 #define KSTK_EIP(tsk) \
355 ({ \
356 struct pt_regs *_regs = task_pt_regs(tsk); \
357 _regs->cr_iip + ia64_psr(_regs)->ri; \
360 /* Return stack pointer of blocked task TSK. */
361 #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
363 extern void ia64_getreg_unknown_kr (void);
364 extern void ia64_setreg_unknown_kr (void);
366 #define ia64_get_kr(regnum) \
367 ({ \
368 unsigned long r = 0; \
370 switch (regnum) { \
371 case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
372 case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
373 case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
374 case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
375 case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
376 case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
377 case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
378 case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
379 default: ia64_getreg_unknown_kr(); break; \
381 r; \
384 #define ia64_set_kr(regnum, r) \
385 ({ \
386 switch (regnum) { \
387 case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
388 case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
389 case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
390 case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
391 case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
392 case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
393 case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
394 case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
395 default: ia64_setreg_unknown_kr(); break; \
400 * The following three macros can't be inline functions because we don't have struct
401 * task_struct at this point.
405 * Return TRUE if task T owns the fph partition of the CPU we're running on.
406 * Must be called from code that has preemption disabled.
408 #define ia64_is_local_fpu_owner(t) \
409 ({ \
410 struct task_struct *__ia64_islfo_task = (t); \
411 (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
412 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
416 * Mark task T as owning the fph partition of the CPU we're running on.
417 * Must be called from code that has preemption disabled.
419 #define ia64_set_local_fpu_owner(t) do { \
420 struct task_struct *__ia64_slfo_task = (t); \
421 __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
422 ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
423 } while (0)
425 /* Mark the fph partition of task T as being invalid on all CPUs. */
426 #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
428 extern void __ia64_init_fpu (void);
429 extern void __ia64_save_fpu (struct ia64_fpreg *fph);
430 extern void __ia64_load_fpu (struct ia64_fpreg *fph);
431 extern void ia64_save_debug_regs (unsigned long *save_area);
432 extern void ia64_load_debug_regs (unsigned long *save_area);
434 #ifdef CONFIG_IA32_SUPPORT
435 extern void ia32_save_state (struct task_struct *task);
436 extern void ia32_load_state (struct task_struct *task);
437 #endif
439 #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
440 #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
442 /* load fp 0.0 into fph */
443 static inline void
444 ia64_init_fpu (void) {
445 ia64_fph_enable();
446 __ia64_init_fpu();
447 ia64_fph_disable();
450 /* save f32-f127 at FPH */
451 static inline void
452 ia64_save_fpu (struct ia64_fpreg *fph) {
453 ia64_fph_enable();
454 __ia64_save_fpu(fph);
455 ia64_fph_disable();
458 /* load f32-f127 from FPH */
459 static inline void
460 ia64_load_fpu (struct ia64_fpreg *fph) {
461 ia64_fph_enable();
462 __ia64_load_fpu(fph);
463 ia64_fph_disable();
466 static inline __u64
467 ia64_clear_ic (void)
469 __u64 psr;
470 psr = ia64_getreg(_IA64_REG_PSR);
471 ia64_stop();
472 ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
473 ia64_srlz_i();
474 return psr;
478 * Restore the psr.
480 static inline void
481 ia64_set_psr (__u64 psr)
483 ia64_stop();
484 ia64_setreg(_IA64_REG_PSR_L, psr);
485 ia64_srlz_d();
489 * Insert a translation into an instruction and/or data translation
490 * register.
492 static inline void
493 ia64_itr (__u64 target_mask, __u64 tr_num,
494 __u64 vmaddr, __u64 pte,
495 __u64 log_page_size)
497 ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
498 ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
499 ia64_stop();
500 if (target_mask & 0x1)
501 ia64_itri(tr_num, pte);
502 if (target_mask & 0x2)
503 ia64_itrd(tr_num, pte);
507 * Insert a translation into the instruction and/or data translation
508 * cache.
510 static inline void
511 ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
512 __u64 log_page_size)
514 ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
515 ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
516 ia64_stop();
517 /* as per EAS2.6, itc must be the last instruction in an instruction group */
518 if (target_mask & 0x1)
519 ia64_itci(pte);
520 if (target_mask & 0x2)
521 ia64_itcd(pte);
525 * Purge a range of addresses from instruction and/or data translation
526 * register(s).
528 static inline void
529 ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
531 if (target_mask & 0x1)
532 ia64_ptri(vmaddr, (log_size << 2));
533 if (target_mask & 0x2)
534 ia64_ptrd(vmaddr, (log_size << 2));
537 /* Set the interrupt vector address. The address must be suitably aligned (32KB). */
538 static inline void
539 ia64_set_iva (void *ivt_addr)
541 ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
542 ia64_srlz_i();
545 /* Set the page table address and control bits. */
546 static inline void
547 ia64_set_pta (__u64 pta)
549 /* Note: srlz.i implies srlz.d */
550 ia64_setreg(_IA64_REG_CR_PTA, pta);
551 ia64_srlz_i();
554 static inline void
555 ia64_eoi (void)
557 ia64_setreg(_IA64_REG_CR_EOI, 0);
558 ia64_srlz_d();
561 #define cpu_relax() ia64_hint(ia64_hint_pause)
563 static inline void
564 ia64_set_lrr0 (unsigned long val)
566 ia64_setreg(_IA64_REG_CR_LRR0, val);
567 ia64_srlz_d();
570 static inline void
571 ia64_set_lrr1 (unsigned long val)
573 ia64_setreg(_IA64_REG_CR_LRR1, val);
574 ia64_srlz_d();
579 * Given the address to which a spill occurred, return the unat bit
580 * number that corresponds to this address.
582 static inline __u64
583 ia64_unat_pos (void *spill_addr)
585 return ((__u64) spill_addr >> 3) & 0x3f;
589 * Set the NaT bit of an integer register which was spilled at address
590 * SPILL_ADDR. UNAT is the mask to be updated.
592 static inline void
593 ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
595 __u64 bit = ia64_unat_pos(spill_addr);
596 __u64 mask = 1UL << bit;
598 *unat = (*unat & ~mask) | (nat << bit);
602 * Return saved PC of a blocked thread.
603 * Note that the only way T can block is through a call to schedule() -> switch_to().
605 static inline unsigned long
606 thread_saved_pc (struct task_struct *t)
608 struct unw_frame_info info;
609 unsigned long ip;
611 unw_init_from_blocked_task(&info, t);
612 if (unw_unwind(&info) < 0)
613 return 0;
614 unw_get_ip(&info, &ip);
615 return ip;
619 * Get the current instruction/program counter value.
621 #define current_text_addr() \
622 ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
624 static inline __u64
625 ia64_get_ivr (void)
627 __u64 r;
628 ia64_srlz_d();
629 r = ia64_getreg(_IA64_REG_CR_IVR);
630 ia64_srlz_d();
631 return r;
634 static inline void
635 ia64_set_dbr (__u64 regnum, __u64 value)
637 __ia64_set_dbr(regnum, value);
638 #ifdef CONFIG_ITANIUM
639 ia64_srlz_d();
640 #endif
643 static inline __u64
644 ia64_get_dbr (__u64 regnum)
646 __u64 retval;
648 retval = __ia64_get_dbr(regnum);
649 #ifdef CONFIG_ITANIUM
650 ia64_srlz_d();
651 #endif
652 return retval;
655 static inline __u64
656 ia64_rotr (__u64 w, __u64 n)
658 return (w >> n) | (w << (64 - n));
661 #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
664 * Take a mapped kernel address and return the equivalent address
665 * in the region 7 identity mapped virtual area.
667 static inline void *
668 ia64_imva (void *addr)
670 void *result;
671 result = (void *) ia64_tpa(addr);
672 return __va(result);
675 #define ARCH_HAS_PREFETCH
676 #define ARCH_HAS_PREFETCHW
677 #define ARCH_HAS_SPINLOCK_PREFETCH
678 #define PREFETCH_STRIDE L1_CACHE_BYTES
680 static inline void
681 prefetch (const void *x)
683 ia64_lfetch(ia64_lfhint_none, x);
686 static inline void
687 prefetchw (const void *x)
689 ia64_lfetch_excl(ia64_lfhint_none, x);
692 #define spin_lock_prefetch(x) prefetchw(x)
694 extern unsigned long boot_option_idle_override;
696 #endif /* !__ASSEMBLY__ */
698 #endif /* _ASM_IA64_PROCESSOR_H */