MMC headers learn about SPI
[linux-2.6/mini2440.git] / include / asm-mips / processor.h
blob83bc94534084a429735e9d6620fdb1a02c2d9f80
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 Waldorf GMBH
7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
8 * Copyright (C) 1996 Paul M. Antoine
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11 #ifndef _ASM_PROCESSOR_H
12 #define _ASM_PROCESSOR_H
14 #include <linux/cpumask.h>
15 #include <linux/threads.h>
17 #include <asm/cachectl.h>
18 #include <asm/cpu.h>
19 #include <asm/cpu-info.h>
20 #include <asm/mipsregs.h>
21 #include <asm/prefetch.h>
22 #include <asm/system.h>
25 * Return current * instruction pointer ("program counter").
27 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
30 * System setup and hardware flags..
32 extern void (*cpu_wait)(void);
34 extern unsigned int vced_count, vcei_count;
36 #ifdef CONFIG_32BIT
38 * User space process size: 2GB. This is hardcoded into a few places,
39 * so don't change it unless you know what you are doing.
41 #define TASK_SIZE 0x7fff8000UL
44 * This decides where the kernel will search for a free chunk of vm
45 * space during mmap's.
47 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
48 #endif
50 #ifdef CONFIG_64BIT
52 * User space process size: 1TB. This is hardcoded into a few places,
53 * so don't change it unless you know what you are doing. TASK_SIZE
54 * is limited to 1TB by the R4000 architecture; R10000 and better can
55 * support 16TB; the architectural reserve for future expansion is
56 * 8192EB ...
58 #define TASK_SIZE32 0x7fff8000UL
59 #define TASK_SIZE 0x10000000000UL
62 * This decides where the kernel will search for a free chunk of vm
63 * space during mmap's.
65 #define TASK_UNMAPPED_BASE \
66 (test_thread_flag(TIF_32BIT_ADDR) ? \
67 PAGE_ALIGN(TASK_SIZE32 / 3) : PAGE_ALIGN(TASK_SIZE / 3))
68 #endif
70 #define NUM_FPU_REGS 32
72 typedef __u64 fpureg_t;
75 * It would be nice to add some more fields for emulator statistics, but there
76 * are a number of fixed offsets in offset.h and elsewhere that would have to
77 * be recalculated by hand. So the additional information will be private to
78 * the FPU emulator for now. See asm-mips/fpu_emulator.h.
81 struct mips_fpu_struct {
82 fpureg_t fpr[NUM_FPU_REGS];
83 unsigned int fcr31;
86 #define NUM_DSP_REGS 6
88 typedef __u32 dspreg_t;
90 struct mips_dsp_state {
91 dspreg_t dspr[NUM_DSP_REGS];
92 unsigned int dspcontrol;
95 #define INIT_CPUMASK { \
96 {0,} \
99 typedef struct {
100 unsigned long seg;
101 } mm_segment_t;
103 #define ARCH_MIN_TASKALIGN 8
105 struct mips_abi;
108 * If you change thread_struct remember to change the #defines below too!
110 struct thread_struct {
111 /* Saved main processor registers. */
112 unsigned long reg16;
113 unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
114 unsigned long reg29, reg30, reg31;
116 /* Saved cp0 stuff. */
117 unsigned long cp0_status;
119 /* Saved fpu/fpu emulator stuff. */
120 struct mips_fpu_struct fpu;
121 #ifdef CONFIG_MIPS_MT_FPAFF
122 /* Emulated instruction count */
123 unsigned long emulated_fp;
124 /* Saved per-thread scheduler affinity mask */
125 cpumask_t user_cpus_allowed;
126 #endif /* CONFIG_MIPS_MT_FPAFF */
128 /* Saved state of the DSP ASE, if available. */
129 struct mips_dsp_state dsp;
131 /* Other stuff associated with the thread. */
132 unsigned long cp0_badvaddr; /* Last user fault */
133 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
134 unsigned long error_code;
135 unsigned long trap_no;
136 unsigned long irix_trampoline; /* Wheee... */
137 unsigned long irix_oldctx;
138 struct mips_abi *abi;
141 #ifdef CONFIG_MIPS_MT_FPAFF
142 #define FPAFF_INIT \
143 .emulated_fp = 0, \
144 .user_cpus_allowed = INIT_CPUMASK,
145 #else
146 #define FPAFF_INIT
147 #endif /* CONFIG_MIPS_MT_FPAFF */
149 #define INIT_THREAD { \
150 /* \
151 * Saved main processor registers \
152 */ \
153 .reg16 = 0, \
154 .reg17 = 0, \
155 .reg18 = 0, \
156 .reg19 = 0, \
157 .reg20 = 0, \
158 .reg21 = 0, \
159 .reg22 = 0, \
160 .reg23 = 0, \
161 .reg29 = 0, \
162 .reg30 = 0, \
163 .reg31 = 0, \
164 /* \
165 * Saved cp0 stuff \
166 */ \
167 .cp0_status = 0, \
168 /* \
169 * Saved FPU/FPU emulator stuff \
170 */ \
171 .fpu = { \
172 .fpr = {0,}, \
173 .fcr31 = 0, \
174 }, \
175 /* \
176 * FPU affinity state (null if not FPAFF) \
177 */ \
178 FPAFF_INIT \
179 /* \
180 * Saved DSP stuff \
181 */ \
182 .dsp = { \
183 .dspr = {0, }, \
184 .dspcontrol = 0, \
185 }, \
186 /* \
187 * Other stuff associated with the process \
188 */ \
189 .cp0_badvaddr = 0, \
190 .cp0_baduaddr = 0, \
191 .error_code = 0, \
192 .trap_no = 0, \
193 .irix_trampoline = 0, \
194 .irix_oldctx = 0, \
197 struct task_struct;
199 /* Free all resources held by a thread. */
200 #define release_thread(thread) do { } while(0)
202 /* Prepare to copy thread state - unlazy all lazy status */
203 #define prepare_to_copy(tsk) do { } while (0)
205 extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
207 extern unsigned long thread_saved_pc(struct task_struct *tsk);
210 * Do necessary setup to start up a newly executed thread.
212 extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
214 unsigned long get_wchan(struct task_struct *p);
216 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
217 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk) - 1)
218 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
219 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
220 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
222 #define cpu_relax() barrier()
225 * Return_address is a replacement for __builtin_return_address(count)
226 * which on certain architectures cannot reasonably be implemented in GCC
227 * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386).
228 * Note that __builtin_return_address(x>=1) is forbidden because GCC
229 * aborts compilation on some CPUs. It's simply not possible to unwind
230 * some CPU's stackframes.
232 * __builtin_return_address works only for non-leaf functions. We avoid the
233 * overhead of a function call by forcing the compiler to save the return
234 * address register on the stack.
236 #define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
238 #ifdef CONFIG_CPU_HAS_PREFETCH
240 #define ARCH_HAS_PREFETCH
242 static inline void prefetch(const void *addr)
244 __asm__ __volatile__(
245 " .set mips4 \n"
246 " pref %0, (%1) \n"
247 " .set mips0 \n"
249 : "i" (Pref_Load), "r" (addr));
252 #endif
254 #endif /* _ASM_PROCESSOR_H */