[PATCH] powerpc: merge atomic.h, memory.h
[linux-2.6/linux-loongson.git] / include / asm-ppc64 / processor.h
blobe5fc18531ec11ba2d12d5c76c68d0d56ea067f52
1 #ifndef __ASM_PPC64_PROCESSOR_H
2 #define __ASM_PPC64_PROCESSOR_H
4 /*
5 * Copyright (C) 2001 PPC 64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/stringify.h>
14 #ifndef __ASSEMBLY__
15 #include <linux/config.h>
16 #include <asm/atomic.h>
17 #include <asm/ppcdebug.h>
18 #include <asm/a.out.h>
19 #endif
20 #include <asm/ptrace.h>
21 #include <asm/types.h>
22 #include <asm/systemcfg.h>
23 #include <asm/cputable.h>
25 /* Machine State Register (MSR) Fields */
26 #define MSR_SF_LG 63 /* Enable 64 bit mode */
27 #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
28 #define MSR_HV_LG 60 /* Hypervisor state */
29 #define MSR_VEC_LG 25 /* Enable AltiVec */
30 #define MSR_POW_LG 18 /* Enable Power Management */
31 #define MSR_WE_LG 18 /* Wait State Enable */
32 #define MSR_TGPR_LG 17 /* TLB Update registers in use */
33 #define MSR_CE_LG 17 /* Critical Interrupt Enable */
34 #define MSR_ILE_LG 16 /* Interrupt Little Endian */
35 #define MSR_EE_LG 15 /* External Interrupt Enable */
36 #define MSR_PR_LG 14 /* Problem State / Privilege Level */
37 #define MSR_FP_LG 13 /* Floating Point enable */
38 #define MSR_ME_LG 12 /* Machine Check Enable */
39 #define MSR_FE0_LG 11 /* Floating Exception mode 0 */
40 #define MSR_SE_LG 10 /* Single Step */
41 #define MSR_BE_LG 9 /* Branch Trace */
42 #define MSR_DE_LG 9 /* Debug Exception Enable */
43 #define MSR_FE1_LG 8 /* Floating Exception mode 1 */
44 #define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
45 #define MSR_IR_LG 5 /* Instruction Relocate */
46 #define MSR_DR_LG 4 /* Data Relocate */
47 #define MSR_PE_LG 3 /* Protection Enable */
48 #define MSR_PX_LG 2 /* Protection Exclusive Mode */
49 #define MSR_PMM_LG 2 /* Performance monitor */
50 #define MSR_RI_LG 1 /* Recoverable Exception */
51 #define MSR_LE_LG 0 /* Little Endian */
53 #ifdef __ASSEMBLY__
54 #define __MASK(X) (1<<(X))
55 #else
56 #define __MASK(X) (1UL<<(X))
57 #endif
59 #define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
60 #define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
61 #define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
62 #define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
63 #define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
64 #define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
65 #define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
66 #define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
67 #define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
68 #define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
69 #define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
70 #define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
71 #define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
72 #define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
73 #define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
74 #define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
75 #define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
76 #define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
77 #define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
78 #define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
79 #define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
80 #define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
81 #define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
82 #define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
83 #define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
84 #define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
86 #define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
87 #define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
89 #define MSR_USER32 MSR_ | MSR_PR | MSR_EE
90 #define MSR_USER64 MSR_USER32 | MSR_SF
92 /* Floating Point Status and Control Register (FPSCR) Fields */
94 #define FPSCR_FX 0x80000000 /* FPU exception summary */
95 #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
96 #define FPSCR_VX 0x20000000 /* Invalid operation summary */
97 #define FPSCR_OX 0x10000000 /* Overflow exception summary */
98 #define FPSCR_UX 0x08000000 /* Underflow exception summary */
99 #define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
100 #define FPSCR_XX 0x02000000 /* Inexact exception summary */
101 #define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
102 #define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
103 #define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
104 #define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
105 #define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
106 #define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
107 #define FPSCR_FR 0x00040000 /* Fraction rounded */
108 #define FPSCR_FI 0x00020000 /* Fraction inexact */
109 #define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
110 #define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
111 #define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
112 #define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
113 #define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
114 #define FPSCR_VE 0x00000080 /* Invalid op exception enable */
115 #define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
116 #define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
117 #define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
118 #define FPSCR_XE 0x00000008 /* FP inexact exception enable */
119 #define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
120 #define FPSCR_RN 0x00000003 /* FPU rounding control */
122 /* Special Purpose Registers (SPRNs)*/
124 #define SPRN_CTR 0x009 /* Count Register */
125 #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
126 #define DABR_TRANSLATION (1UL << 2)
127 #define SPRN_DAR 0x013 /* Data Address Register */
128 #define SPRN_DEC 0x016 /* Decrement Register */
129 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
130 #define DSISR_NOHPTE 0x40000000 /* no translation found */
131 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
132 #define DSISR_ISSTORE 0x02000000 /* access was a store */
133 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
134 #define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
135 #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
136 #define SPRN_MSRDORM 0x3F1 /* Hardware Implementation Register 1 */
137 #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
138 #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
139 #define SPRN_NIADORM 0x3F3 /* Hardware Implementation Register 2 */
140 #define SPRN_HID4 0x3F4 /* 970 HID4 */
141 #define SPRN_HID5 0x3F6 /* 970 HID5 */
142 #define SPRN_HID6 0x3F9 /* BE HID 6 */
143 #define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
144 #define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
145 #define SPRN_TSCR 0x399 /* Thread switch control on BE */
146 #define SPRN_TTR 0x39A /* Thread switch timeout on BE */
147 #define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
148 #define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
149 #define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
150 #define SPRN_TSC 0x3FD /* Thread switch control on others */
151 #define SPRN_TST 0x3FC /* Thread switch timeout on others */
152 #define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
153 #define SPRN_LR 0x008 /* Link Register */
154 #define SPRN_PIR 0x3FF /* Processor Identification Register */
155 #define SPRN_PIT 0x3DB /* Programmable Interval Timer */
156 #define SPRN_PURR 0x135 /* Processor Utilization of Resources Register */
157 #define SPRN_PVR 0x11F /* Processor Version Register */
158 #define SPRN_RPA 0x3D6 /* Required Physical Address Register */
159 #define SPRN_SDA 0x3BF /* Sampled Data Address Register */
160 #define SPRN_SDR1 0x019 /* MMU Hash Base Register */
161 #define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
162 #define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
163 #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
164 #define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
165 #define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
166 #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
167 #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
168 #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
169 #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
170 #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, W/O) */
171 #define SPRN_TBWU 0x11D /* Time Base Write Upper Register (super, W/O) */
172 #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
173 #define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
174 #define SPRN_XER 0x001 /* Fixed Point Exception Register */
175 #define SPRN_VRSAVE 0x100 /* Vector save */
176 #define SPRN_CTRLF 0x088
177 #define SPRN_CTRLT 0x098
178 #define CTRL_RUNLATCH 0x1
180 /* Performance monitor SPRs */
181 #define SPRN_SIAR 780
182 #define SPRN_SDAR 781
183 #define SPRN_MMCRA 786
184 #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
185 #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
186 #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
187 #define SPRN_PMC1 787
188 #define SPRN_PMC2 788
189 #define SPRN_PMC3 789
190 #define SPRN_PMC4 790
191 #define SPRN_PMC5 791
192 #define SPRN_PMC6 792
193 #define SPRN_PMC7 793
194 #define SPRN_PMC8 794
195 #define SPRN_MMCR0 795
196 #define MMCR0_FC 0x80000000UL /* freeze counters. set to 1 on a perfmon exception */
197 #define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
198 #define MMCR0_KERNEL_DISABLE MMCR0_FCS
199 #define MMCR0_FCP 0x20000000UL /* freeze in problem state */
200 #define MMCR0_PROBLEM_DISABLE MMCR0_FCP
201 #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
202 #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
203 #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
204 #define MMCR0_FCECE 0x02000000UL /* freeze counters on enabled condition or event */
205 /* time base exception enable */
206 #define MMCR0_TBEE 0x00400000UL /* time base exception enable */
207 #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
208 #define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
209 #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
210 #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
211 #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
212 #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
213 #define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
214 #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
215 #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
216 #define SPRN_MMCR1 798
218 /* Short-hand versions for a number of the above SPRNs */
220 #define CTR SPRN_CTR /* Counter Register */
221 #define DAR SPRN_DAR /* Data Address Register */
222 #define DABR SPRN_DABR /* Data Address Breakpoint Register */
223 #define DEC SPRN_DEC /* Decrement Register */
224 #define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
225 #define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */
226 #define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */
227 #define NIADORM SPRN_NIADORM /* NIA Dormant Register */
228 #define TSC SPRN_TSC /* Thread switch control */
229 #define TST SPRN_TST /* Thread switch timeout */
230 #define IABR SPRN_IABR /* Instruction Address Breakpoint Register */
231 #define L2CR SPRN_L2CR /* PPC 750 L2 control register */
232 #define __LR SPRN_LR
233 #define PVR SPRN_PVR /* Processor Version */
234 #define PIR SPRN_PIR /* Processor ID */
235 #define PURR SPRN_PURR /* Processor Utilization of Resource Register */
236 #define SDR1 SPRN_SDR1 /* MMU hash base register */
237 #define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
238 #define SPR1 SPRN_SPRG1
239 #define SPR2 SPRN_SPRG2
240 #define SPR3 SPRN_SPRG3
241 #define SPRG0 SPRN_SPRG0
242 #define SPRG1 SPRN_SPRG1
243 #define SPRG2 SPRN_SPRG2
244 #define SPRG3 SPRN_SPRG3
245 #define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */
246 #define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
247 #define TBRL SPRN_TBRL /* Time Base Read Lower Register */
248 #define TBRU SPRN_TBRU /* Time Base Read Upper Register */
249 #define TBWL SPRN_TBWL /* Time Base Write Lower Register */
250 #define TBWU SPRN_TBWU /* Time Base Write Upper Register */
251 #define XER SPRN_XER
253 /* Processor Version Register (PVR) field extraction */
255 #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
256 #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
258 /* Processor Version Numbers */
259 #define PV_NORTHSTAR 0x0033
260 #define PV_PULSAR 0x0034
261 #define PV_POWER4 0x0035
262 #define PV_ICESTAR 0x0036
263 #define PV_SSTAR 0x0037
264 #define PV_POWER4p 0x0038
265 #define PV_970 0x0039
266 #define PV_POWER5 0x003A
267 #define PV_POWER5p 0x003B
268 #define PV_970FX 0x003C
269 #define PV_630 0x0040
270 #define PV_630p 0x0041
271 #define PV_970MP 0x0044
272 #define PV_BE 0x0070
274 /* Platforms supported by PPC64 */
275 #define PLATFORM_PSERIES 0x0100
276 #define PLATFORM_PSERIES_LPAR 0x0101
277 #define PLATFORM_ISERIES_LPAR 0x0201
278 #define PLATFORM_LPAR 0x0001
279 #define PLATFORM_POWERMAC 0x0400
280 #define PLATFORM_MAPLE 0x0500
281 #define PLATFORM_BPA 0x1000
283 /* Compatibility with drivers coming from PPC32 world */
284 #define _machine (systemcfg->platform)
285 #define _MACH_Pmac PLATFORM_POWERMAC
288 * List of interrupt controllers.
290 #define IC_INVALID 0
291 #define IC_OPEN_PIC 1
292 #define IC_PPC_XIC 2
293 #define IC_BPA_IIC 3
295 #define XGLUE(a,b) a##b
296 #define GLUE(a,b) XGLUE(a,b)
298 #ifdef __ASSEMBLY__
300 #define _GLOBAL(name) \
301 .section ".text"; \
302 .align 2 ; \
303 .globl name; \
304 .globl GLUE(.,name); \
305 .section ".opd","aw"; \
306 name: \
307 .quad GLUE(.,name); \
308 .quad .TOC.@tocbase; \
309 .quad 0; \
310 .previous; \
311 .type GLUE(.,name),@function; \
312 GLUE(.,name):
314 #define _KPROBE(name) \
315 .section ".kprobes.text","a"; \
316 .align 2 ; \
317 .globl name; \
318 .globl GLUE(.,name); \
319 .section ".opd","aw"; \
320 name: \
321 .quad GLUE(.,name); \
322 .quad .TOC.@tocbase; \
323 .quad 0; \
324 .previous; \
325 .type GLUE(.,name),@function; \
326 GLUE(.,name):
328 #define _STATIC(name) \
329 .section ".text"; \
330 .align 2 ; \
331 .section ".opd","aw"; \
332 name: \
333 .quad GLUE(.,name); \
334 .quad .TOC.@tocbase; \
335 .quad 0; \
336 .previous; \
337 .type GLUE(.,name),@function; \
338 GLUE(.,name):
340 #else /* __ASSEMBLY__ */
343 * Default implementation of macro that returns current
344 * instruction pointer ("program counter").
346 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
348 /* Macros for setting and retrieving special purpose registers */
350 #define mfmsr() ({unsigned long rval; \
351 asm volatile("mfmsr %0" : "=r" (rval)); rval;})
353 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
354 : : "r" (v))
355 #define mtmsrd(v) __mtmsrd((v), 0)
357 #define mfspr(rn) ({unsigned long rval; \
358 asm volatile("mfspr %0," __stringify(rn) \
359 : "=r" (rval)); rval;})
360 #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
362 #define mftb() ({unsigned long rval; \
363 asm volatile("mftb %0" : "=r" (rval)); rval;})
365 #define mttbl(v) asm volatile("mttbl %0":: "r"(v))
366 #define mttbu(v) asm volatile("mttbu %0":: "r"(v))
368 #define mfasr() ({unsigned long rval; \
369 asm volatile("mfasr %0" : "=r" (rval)); rval;})
371 /* Macros for adjusting thread priority (hardware multi-threading) */
372 #define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
373 #define HMT_low() asm volatile("or 1,1,1 # low priority")
374 #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
375 #define HMT_medium() asm volatile("or 2,2,2 # medium priority")
376 #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
377 #define HMT_high() asm volatile("or 3,3,3 # high priority")
379 static inline void set_tb(unsigned int upper, unsigned int lower)
381 mttbl(0);
382 mttbu(upper);
383 mttbl(lower);
386 #define __get_SP() ({unsigned long sp; \
387 asm volatile("mr %0,1": "=r" (sp)); sp;})
389 #ifdef __KERNEL__
391 extern int have_of;
392 extern u64 ppc64_interrupt_controller;
394 struct task_struct;
395 void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
396 void release_thread(struct task_struct *);
398 /* Prepare to copy thread state - unlazy all lazy status */
399 extern void prepare_to_copy(struct task_struct *tsk);
401 /* Create a new kernel thread. */
402 extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
404 /* Lazy FPU handling on uni-processor */
405 extern struct task_struct *last_task_used_math;
406 extern struct task_struct *last_task_used_altivec;
408 /* 64-bit user address space is 44-bits (16TB user VM) */
409 #define TASK_SIZE_USER64 (0x0000100000000000UL)
412 * 32-bit user address space is 4GB - 1 page
413 * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
415 #define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
417 #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
418 TASK_SIZE_USER32 : TASK_SIZE_USER64)
420 /* This decides where the kernel will search for a free chunk of vm
421 * space during mmap's.
423 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
424 #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
426 #define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
427 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
429 typedef struct {
430 unsigned long seg;
431 } mm_segment_t;
433 struct thread_struct {
434 unsigned long ksp; /* Kernel stack pointer */
435 unsigned long ksp_vsid;
436 struct pt_regs *regs; /* Pointer to saved register state */
437 mm_segment_t fs; /* for get_fs() validation */
438 double fpr[32]; /* Complete floating point set */
439 unsigned long fpscr; /* Floating point status (plus pad) */
440 unsigned long fpexc_mode; /* Floating-point exception mode */
441 unsigned long start_tb; /* Start purr when proc switched in */
442 unsigned long accum_tb; /* Total accumilated purr for process */
443 unsigned long vdso_base; /* base of the vDSO library */
444 unsigned long dabr; /* Data address breakpoint register */
445 #ifdef CONFIG_ALTIVEC
446 /* Complete AltiVec register set */
447 vector128 vr[32] __attribute((aligned(16)));
448 /* AltiVec status */
449 vector128 vscr __attribute((aligned(16)));
450 unsigned long vrsave;
451 int used_vr; /* set if process has used altivec */
452 #endif /* CONFIG_ALTIVEC */
455 #define ARCH_MIN_TASKALIGN 16
457 #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
459 #define INIT_THREAD { \
460 .ksp = INIT_SP, \
461 .regs = (struct pt_regs *)INIT_SP - 1, \
462 .fs = KERNEL_DS, \
463 .fpr = {0}, \
464 .fpscr = 0, \
465 .fpexc_mode = MSR_FE0|MSR_FE1, \
469 * Return saved PC of a blocked thread. For now, this is the "user" PC
471 #define thread_saved_pc(tsk) \
472 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
474 unsigned long get_wchan(struct task_struct *p);
476 #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
477 #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
479 /* Get/set floating-point exception mode */
480 #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
481 #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
483 extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
484 extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
486 static inline unsigned int __unpack_fe01(unsigned long msr_bits)
488 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
491 static inline unsigned long __pack_fe01(unsigned int fpmode)
493 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
496 #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
499 * Prefetch macros.
501 #define ARCH_HAS_PREFETCH
502 #define ARCH_HAS_PREFETCHW
503 #define ARCH_HAS_SPINLOCK_PREFETCH
505 static inline void prefetch(const void *x)
507 if (unlikely(!x))
508 return;
510 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
513 static inline void prefetchw(const void *x)
515 if (unlikely(!x))
516 return;
518 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
521 #define spin_lock_prefetch(x) prefetchw(x)
523 #define HAVE_ARCH_PICK_MMAP_LAYOUT
525 static inline void ppc64_runlatch_on(void)
527 unsigned long ctrl;
529 if (cpu_has_feature(CPU_FTR_CTRL)) {
530 ctrl = mfspr(SPRN_CTRLF);
531 ctrl |= CTRL_RUNLATCH;
532 mtspr(SPRN_CTRLT, ctrl);
536 static inline void ppc64_runlatch_off(void)
538 unsigned long ctrl;
540 if (cpu_has_feature(CPU_FTR_CTRL)) {
541 ctrl = mfspr(SPRN_CTRLF);
542 ctrl &= ~CTRL_RUNLATCH;
543 mtspr(SPRN_CTRLT, ctrl);
547 #endif /* __KERNEL__ */
549 #endif /* __ASSEMBLY__ */
551 #ifdef __KERNEL__
552 #define RUNLATCH_ON(REG) \
553 BEGIN_FTR_SECTION \
554 mfspr (REG),SPRN_CTRLF; \
555 ori (REG),(REG),CTRL_RUNLATCH; \
556 mtspr SPRN_CTRLT,(REG); \
557 END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
558 #endif
561 * Number of entries in the SLB. If this ever changes we should handle
562 * it with a use a cpu feature fixup.
564 #define SLB_NUM_ENTRIES 64
566 #endif /* __ASM_PPC64_PROCESSOR_H */