[POWERPC] 83xx: Make 83xx perfmon support selectable
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-blackfin / system.h
blob51494ef5bb411759c12c2db6ab69dc6d3dfbd796
1 /*
2 * File: include/asm/system.h
3 * Based on:
4 * Author: Tony Kou (tonyko@lineo.ca)
5 * Copyright (c) 2002 Arcturus Networks Inc.
6 * (www.arcturusnetworks.com)
7 * Copyright (c) 2003 Metrowerks (www.metrowerks.com)
8 * Copyright (c) 2004 Analog Device Inc.
9 * Created: 25Jan2001 - Tony Kou
10 * Description: system.h include file
12 * Modified: 22Sep2006 - Robin Getz
13 * - move include blackfin.h down, so I can get access to
14 * irq functions in other include files.
16 * Bugs: Enter bugs at http://blackfin.uclinux.org/
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; see the file COPYING.
30 * If not, write to the Free Software Foundation,
31 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
34 #ifndef _BLACKFIN_SYSTEM_H
35 #define _BLACKFIN_SYSTEM_H
37 #include <linux/linkage.h>
38 #include <linux/compiler.h>
39 #include <asm/mach/anomaly.h>
42 * Interrupt configuring macros.
45 extern unsigned long irq_flags;
47 #define local_irq_enable() \
48 __asm__ __volatile__( \
49 "sti %0;" \
50 : \
51 : "d" (irq_flags) \
54 #define local_irq_disable() \
55 do { \
56 int __tmp_dummy; \
57 __asm__ __volatile__( \
58 "cli %0;" \
59 : "=d" (__tmp_dummy) \
60 ); \
61 } while (0)
63 #if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
64 # define NOP_PAD_ANOMALY_05000244 "nop; nop;"
65 #else
66 # define NOP_PAD_ANOMALY_05000244
67 #endif
69 #define idle_with_irq_disabled() \
70 __asm__ __volatile__( \
71 NOP_PAD_ANOMALY_05000244 \
72 ".align 8;" \
73 "sti %0;" \
74 "idle;" \
75 : \
76 : "d" (irq_flags) \
79 #ifdef CONFIG_DEBUG_HWERR
80 # define __save_and_cli(x) \
81 __asm__ __volatile__( \
82 "cli %0;" \
83 "sti %1;" \
84 : "=&d" (x) \
85 : "d" (0x3F) \
87 #else
88 # define __save_and_cli(x) \
89 __asm__ __volatile__( \
90 "cli %0;" \
91 : "=&d" (x) \
93 #endif
95 #define local_save_flags(x) \
96 __asm__ __volatile__( \
97 "cli %0;" \
98 "sti %0;" \
99 : "=d" (x) \
102 #ifdef CONFIG_DEBUG_HWERR
103 #define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
104 #else
105 #define irqs_enabled_from_flags(x) ((x) != 0x1f)
106 #endif
108 #define local_irq_restore(x) \
109 do { \
110 if (irqs_enabled_from_flags(x)) \
111 local_irq_enable(); \
112 } while (0)
114 /* For spinlocks etc */
115 #define local_irq_save(x) __save_and_cli(x)
117 #define irqs_disabled() \
118 ({ \
119 unsigned long flags; \
120 local_save_flags(flags); \
121 !irqs_enabled_from_flags(flags); \
125 * Force strict CPU ordering.
127 #define nop() asm volatile ("nop;\n\t"::)
128 #define mb() asm volatile ("" : : :"memory")
129 #define rmb() asm volatile ("" : : :"memory")
130 #define wmb() asm volatile ("" : : :"memory")
131 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
133 #define read_barrier_depends() do { } while(0)
135 #ifdef CONFIG_SMP
136 #define smp_mb() mb()
137 #define smp_rmb() rmb()
138 #define smp_wmb() wmb()
139 #define smp_read_barrier_depends() read_barrier_depends()
140 #else
141 #define smp_mb() barrier()
142 #define smp_rmb() barrier()
143 #define smp_wmb() barrier()
144 #define smp_read_barrier_depends() do { } while(0)
145 #endif
147 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
149 struct __xchg_dummy {
150 unsigned long a[100];
152 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
154 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
155 int size)
157 unsigned long tmp = 0;
158 unsigned long flags = 0;
160 local_irq_save(flags);
162 switch (size) {
163 case 1:
164 __asm__ __volatile__
165 ("%0 = b%2 (z);\n\t"
166 "b%2 = %1;\n\t"
167 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
168 break;
169 case 2:
170 __asm__ __volatile__
171 ("%0 = w%2 (z);\n\t"
172 "w%2 = %1;\n\t"
173 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
174 break;
175 case 4:
176 __asm__ __volatile__
177 ("%0 = %2;\n\t"
178 "%2 = %1;\n\t"
179 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
180 break;
182 local_irq_restore(flags);
183 return tmp;
186 #include <asm-generic/cmpxchg-local.h>
189 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
190 * them available.
192 #define cmpxchg_local(ptr, o, n) \
193 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
194 (unsigned long)(n), sizeof(*(ptr))))
195 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
197 #ifndef CONFIG_SMP
198 #include <asm-generic/cmpxchg.h>
199 #endif
201 #define prepare_to_switch() do { } while(0)
204 * switch_to(n) should switch tasks to task ptr, first checking that
205 * ptr isn't the current task, in which case it does nothing.
208 #include <asm/blackfin.h>
210 asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next);
212 #define switch_to(prev,next,last) \
213 do { \
214 memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \
215 sizeof *L1_SCRATCH_TASK_INFO); \
216 memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \
217 sizeof *L1_SCRATCH_TASK_INFO); \
218 (last) = resume (prev, next); \
219 } while (0)
221 #endif /* _BLACKFIN_SYSTEM_H */