iommu/ipmmu-vmsa: Rewrite page table management
[linux-2.6/btrfs-unstable.git] / arch / mips / include / asm / processor.h
blobad70cba8daffaeaeb7f62a7a47308dc1597a8be4
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 Waldorf GMBH
7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
8 * Copyright (C) 1996 Paul M. Antoine
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11 #ifndef _ASM_PROCESSOR_H
12 #define _ASM_PROCESSOR_H
14 #include <linux/cpumask.h>
15 #include <linux/threads.h>
17 #include <asm/cachectl.h>
18 #include <asm/cpu.h>
19 #include <asm/cpu-info.h>
20 #include <asm/mipsregs.h>
21 #include <asm/prefetch.h>
24 * Return current * instruction pointer ("program counter").
26 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
29 * System setup and hardware flags..
32 extern unsigned int vced_count, vcei_count;
35 * MIPS does have an arch_pick_mmap_layout()
37 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
40 * A special page (the vdso) is mapped into all processes at the very
41 * top of the virtual memory space.
43 #define SPECIAL_PAGES_SIZE PAGE_SIZE
45 #ifdef CONFIG_32BIT
46 #ifdef CONFIG_KVM_GUEST
47 /* User space process size is limited to 1GB in KVM Guest Mode */
48 #define TASK_SIZE 0x3fff8000UL
49 #else
51 * User space process size: 2GB. This is hardcoded into a few places,
52 * so don't change it unless you know what you are doing.
54 #define TASK_SIZE 0x7fff8000UL
55 #endif
57 #ifdef __KERNEL__
58 #define STACK_TOP_MAX TASK_SIZE
59 #endif
61 #define TASK_IS_32BIT_ADDR 1
63 #endif
65 #ifdef CONFIG_64BIT
67 * User space process size: 1TB. This is hardcoded into a few places,
68 * so don't change it unless you know what you are doing. TASK_SIZE
69 * is limited to 1TB by the R4000 architecture; R10000 and better can
70 * support 16TB; the architectural reserve for future expansion is
71 * 8192EB ...
73 #define TASK_SIZE32 0x7fff8000UL
74 #define TASK_SIZE64 0x10000000000UL
75 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
77 #ifdef __KERNEL__
78 #define STACK_TOP_MAX TASK_SIZE64
79 #endif
82 #define TASK_SIZE_OF(tsk) \
83 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
85 #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
87 #endif
89 #define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE)
92 * This decides where the kernel will search for a free chunk of vm
93 * space during mmap's.
95 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
98 #define NUM_FPU_REGS 32
100 #ifdef CONFIG_CPU_HAS_MSA
101 # define FPU_REG_WIDTH 128
102 #else
103 # define FPU_REG_WIDTH 64
104 #endif
106 union fpureg {
107 __u32 val32[FPU_REG_WIDTH / 32];
108 __u64 val64[FPU_REG_WIDTH / 64];
111 #ifdef CONFIG_CPU_LITTLE_ENDIAN
112 # define FPR_IDX(width, idx) (idx)
113 #else
114 # define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx))
115 #endif
117 #define BUILD_FPR_ACCESS(width) \
118 static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
120 return fpr->val##width[FPR_IDX(width, idx)]; \
123 static inline void set_fpr##width(union fpureg *fpr, unsigned idx, \
124 u##width val) \
126 fpr->val##width[FPR_IDX(width, idx)] = val; \
129 BUILD_FPR_ACCESS(32)
130 BUILD_FPR_ACCESS(64)
133 * It would be nice to add some more fields for emulator statistics,
134 * the additional information is private to the FPU emulator for now.
135 * See arch/mips/include/asm/fpu_emulator.h.
138 struct mips_fpu_struct {
139 union fpureg fpr[NUM_FPU_REGS];
140 unsigned int fcr31;
141 unsigned int msacsr;
144 #define NUM_DSP_REGS 6
146 typedef __u32 dspreg_t;
148 struct mips_dsp_state {
149 dspreg_t dspr[NUM_DSP_REGS];
150 unsigned int dspcontrol;
153 #define INIT_CPUMASK { \
154 {0,} \
157 struct mips3264_watch_reg_state {
158 /* The width of watchlo is 32 in a 32 bit kernel and 64 in a
159 64 bit kernel. We use unsigned long as it has the same
160 property. */
161 unsigned long watchlo[NUM_WATCH_REGS];
162 /* Only the mask and IRW bits from watchhi. */
163 u16 watchhi[NUM_WATCH_REGS];
166 union mips_watch_reg_state {
167 struct mips3264_watch_reg_state mips3264;
170 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
172 struct octeon_cop2_state {
173 /* DMFC2 rt, 0x0201 */
174 unsigned long cop2_crc_iv;
175 /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
176 unsigned long cop2_crc_length;
177 /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
178 unsigned long cop2_crc_poly;
179 /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
180 unsigned long cop2_llm_dat[2];
181 /* DMFC2 rt, 0x0084 */
182 unsigned long cop2_3des_iv;
183 /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
184 unsigned long cop2_3des_key[3];
185 /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
186 unsigned long cop2_3des_result;
187 /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
188 unsigned long cop2_aes_inp0;
189 /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
190 unsigned long cop2_aes_iv[2];
191 /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
192 * rt, 0x0107 */
193 unsigned long cop2_aes_key[4];
194 /* DMFC2 rt, 0x0110 */
195 unsigned long cop2_aes_keylen;
196 /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
197 unsigned long cop2_aes_result[2];
198 /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
199 * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
200 * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
201 * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
202 * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
203 unsigned long cop2_hsh_datw[15];
204 /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
205 * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
206 * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
207 unsigned long cop2_hsh_ivw[8];
208 /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
209 unsigned long cop2_gfm_mult[2];
210 /* DMFC2 rt, 0x025E - Pass2 */
211 unsigned long cop2_gfm_poly;
212 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
213 unsigned long cop2_gfm_result[2];
215 #define COP2_INIT \
216 .cp2 = {0,},
218 struct octeon_cvmseg_state {
219 unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE]
220 [cpu_dcache_line_size() / sizeof(unsigned long)];
223 #elif defined(CONFIG_CPU_XLP)
224 struct nlm_cop2_state {
225 u64 rx[4];
226 u64 tx[4];
227 u32 tx_msg_status;
228 u32 rx_msg_status;
231 #define COP2_INIT \
232 .cp2 = {{0}, {0}, 0, 0},
233 #else
234 #define COP2_INIT
235 #endif
237 typedef struct {
238 unsigned long seg;
239 } mm_segment_t;
241 #define ARCH_MIN_TASKALIGN 8
243 struct mips_abi;
246 * If you change thread_struct remember to change the #defines below too!
248 struct thread_struct {
249 /* Saved main processor registers. */
250 unsigned long reg16;
251 unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
252 unsigned long reg29, reg30, reg31;
254 /* Saved cp0 stuff. */
255 unsigned long cp0_status;
257 /* Saved fpu/fpu emulator stuff. */
258 struct mips_fpu_struct fpu;
259 #ifdef CONFIG_MIPS_MT_FPAFF
260 /* Emulated instruction count */
261 unsigned long emulated_fp;
262 /* Saved per-thread scheduler affinity mask */
263 cpumask_t user_cpus_allowed;
264 #endif /* CONFIG_MIPS_MT_FPAFF */
266 /* Saved state of the DSP ASE, if available. */
267 struct mips_dsp_state dsp;
269 /* Saved watch register state, if available. */
270 union mips_watch_reg_state watch;
272 /* Other stuff associated with the thread. */
273 unsigned long cp0_badvaddr; /* Last user fault */
274 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
275 unsigned long error_code;
276 #ifdef CONFIG_CPU_CAVIUM_OCTEON
277 struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
278 struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
279 #endif
280 #ifdef CONFIG_CPU_XLP
281 struct nlm_cop2_state cp2;
282 #endif
283 struct mips_abi *abi;
286 #ifdef CONFIG_MIPS_MT_FPAFF
287 #define FPAFF_INIT \
288 .emulated_fp = 0, \
289 .user_cpus_allowed = INIT_CPUMASK,
290 #else
291 #define FPAFF_INIT
292 #endif /* CONFIG_MIPS_MT_FPAFF */
294 #define INIT_THREAD { \
295 /* \
296 * Saved main processor registers \
297 */ \
298 .reg16 = 0, \
299 .reg17 = 0, \
300 .reg18 = 0, \
301 .reg19 = 0, \
302 .reg20 = 0, \
303 .reg21 = 0, \
304 .reg22 = 0, \
305 .reg23 = 0, \
306 .reg29 = 0, \
307 .reg30 = 0, \
308 .reg31 = 0, \
309 /* \
310 * Saved cp0 stuff \
311 */ \
312 .cp0_status = 0, \
313 /* \
314 * Saved FPU/FPU emulator stuff \
315 */ \
316 .fpu = { \
317 .fpr = {{{0,},},}, \
318 .fcr31 = 0, \
319 .msacsr = 0, \
320 }, \
321 /* \
322 * FPU affinity state (null if not FPAFF) \
323 */ \
324 FPAFF_INIT \
325 /* \
326 * Saved DSP stuff \
327 */ \
328 .dsp = { \
329 .dspr = {0, }, \
330 .dspcontrol = 0, \
331 }, \
332 /* \
333 * saved watch register stuff \
334 */ \
335 .watch = {{{0,},},}, \
336 /* \
337 * Other stuff associated with the process \
338 */ \
339 .cp0_badvaddr = 0, \
340 .cp0_baduaddr = 0, \
341 .error_code = 0, \
342 /* \
343 * Platform specific cop2 registers(null if no COP2) \
344 */ \
345 COP2_INIT \
348 struct task_struct;
350 /* Free all resources held by a thread. */
351 #define release_thread(thread) do { } while(0)
353 extern unsigned long thread_saved_pc(struct task_struct *tsk);
356 * Do necessary setup to start up a newly executed thread.
358 extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
360 unsigned long get_wchan(struct task_struct *p);
362 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
363 THREAD_SIZE - 32 - sizeof(struct pt_regs))
364 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
365 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
366 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
367 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
369 #define cpu_relax() barrier()
372 * Return_address is a replacement for __builtin_return_address(count)
373 * which on certain architectures cannot reasonably be implemented in GCC
374 * (MIPS, Alpha) or is unusable with -fomit-frame-pointer (i386).
375 * Note that __builtin_return_address(x>=1) is forbidden because GCC
376 * aborts compilation on some CPUs. It's simply not possible to unwind
377 * some CPU's stackframes.
379 * __builtin_return_address works only for non-leaf functions. We avoid the
380 * overhead of a function call by forcing the compiler to save the return
381 * address register on the stack.
383 #define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
385 #ifdef CONFIG_CPU_HAS_PREFETCH
387 #define ARCH_HAS_PREFETCH
388 #define prefetch(x) __builtin_prefetch((x), 0, 1)
390 #define ARCH_HAS_PREFETCHW
391 #define prefetchw(x) __builtin_prefetch((x), 1, 1)
394 * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP
395 * systems.
397 #define __ARCH_WANT_UNLOCKED_CTXSW
399 #endif
401 #endif /* _ASM_PROCESSOR_H */