Use a spinlock to protect the vdma data structures.
[linux-2.6/linux-mips.git] / include / asm-alpha / processor.h
blob830d6f73bea1d799e0073a4a197e9d756a4ec80e
1 /*
2 * include/asm-alpha/processor.h
4 * Copyright (C) 1994 Linus Torvalds
5 */
7 #ifndef __ASM_ALPHA_PROCESSOR_H
8 #define __ASM_ALPHA_PROCESSOR_H
10 #include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
13 * Returns current instruction pointer ("program counter").
15 #define current_text_addr() \
16 ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
19 * We have a 42-bit user address space: 4TB user VM...
21 #define TASK_SIZE (0x40000000000UL)
23 /* This decides where the kernel will search for a free chunk of vm
24 * space during mmap's.
26 #define TASK_UNMAPPED_BASE \
27 ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
30 * Bus types
32 #define EISA_bus 1
33 #define MCA_bus 0
34 #define MCA_bus__is_a_macro /* for versions in ksyms.c */
36 typedef struct {
37 unsigned long seg;
38 } mm_segment_t;
40 /* This is dead. Everything has been moved to thread_info. */
41 struct thread_struct { };
42 #define INIT_THREAD { }
44 /* Return saved PC of a blocked thread. */
45 struct task_struct;
46 extern unsigned long thread_saved_pc(struct task_struct *);
48 /* Do necessary setup to start up a newly executed thread. */
49 extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
51 /* Free all resources held by a thread. */
52 extern void release_thread(struct task_struct *);
54 /* Prepare to copy thread state - unlazy all lazy status */
55 #define prepare_to_copy(tsk) do { } while (0)
57 /* Create a kernel thread without removing it from tasklists. */
58 extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
60 unsigned long get_wchan(struct task_struct *p);
62 /* See arch/alpha/kernel/ptrace.c for details. */
63 #define PT_REG(reg) \
64 (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
66 #define SW_REG(reg) \
67 (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
68 + offsetof(struct switch_stack, reg))
70 #define KSTK_EIP(tsk) \
71 (*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
73 #define KSTK_ESP(tsk) \
74 ((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp)
76 #define cpu_relax() barrier()
78 #define ARCH_HAS_PREFETCH
79 #define ARCH_HAS_PREFETCHW
80 #define ARCH_HAS_SPINLOCK_PREFETCH
82 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
83 extern inline void prefetch(const void *ptr)
85 __builtin_prefetch(ptr, 0, 3);
88 extern inline void prefetchw(const void *ptr)
90 __builtin_prefetch(ptr, 1, 3);
93 extern inline void spin_lock_prefetch(const void *ptr)
95 __builtin_prefetch(ptr, 1, 3);
97 #else
98 extern inline void prefetch(const void *ptr)
100 __asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
103 extern inline void prefetchw(const void *ptr)
105 __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
108 extern inline void spin_lock_prefetch(const void *ptr)
110 __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
112 #endif /* GCC 3.1 */
114 #endif /* __ASM_ALPHA_PROCESSOR_H */