2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
8 #ifndef __ASM_BARRIER_H
9 #define __ASM_BARRIER_H
12 * read_barrier_depends - Flush all pending reads that subsequents reads
15 * No data-dependent reads from memory-like regions are ever reordered
16 * over this barrier. All reads preceding this primitive are guaranteed
17 * to access memory (but not necessarily other CPUs' caches) before any
18 * reads following this primitive that depend on the data return by
19 * any of the preceding reads. This primitive is much lighter weight than
20 * rmb() on most CPUs, and is never heavier weight than is
23 * These ordering constraints are respected by both the local CPU
26 * Ordering is not guaranteed by anything other than these primitives,
27 * not even by data dependencies. See the documentation for
28 * memory_barrier() for examples and URLs to more information.
30 * For example, the following code would force ordering (the initial
31 * value of "a" is zero, "b" is one, and "p" is "&a"):
39 * read_barrier_depends();
43 * because the read of "*q" depends on the read of "p" and these
44 * two reads are separated by a read_barrier_depends(). However,
45 * the following code, with the same initial values for "a" and "b":
53 * read_barrier_depends();
57 * does not enforce ordering, since there is no data dependency between
58 * the read of "a" and the read of "b". Therefore, on some CPUs, such
59 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
60 * in cases like this where there are no data dependencies.
63 #define read_barrier_depends() do { } while(0)
64 #define smp_read_barrier_depends() do { } while(0)
66 #ifdef CONFIG_CPU_HAS_SYNC
68 __asm__ __volatile__( \
70 ".set noreorder\n\t" \
78 #define __sync() do { } while(0)
81 #define __fast_iob() \
82 __asm__ __volatile__( \
84 ".set noreorder\n\t" \
89 : "m" (*(int *)CKSEG1) \
92 #define fast_wmb() __sync()
93 #define fast_rmb() __sync()
94 #define fast_mb() __sync()
101 #ifdef CONFIG_CPU_HAS_WB
103 #include <asm/wbflush.h>
105 #define wmb() fast_wmb()
106 #define rmb() fast_rmb()
107 #define mb() wbflush()
108 #define iob() wbflush()
110 #else /* !CONFIG_CPU_HAS_WB */
112 #define wmb() fast_wmb()
113 #define rmb() fast_rmb()
114 #define mb() fast_mb()
115 #define iob() fast_iob()
117 #endif /* !CONFIG_CPU_HAS_WB */
119 #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
120 #define __WEAK_ORDERING_MB " sync \n"
122 #define __WEAK_ORDERING_MB " \n"
124 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
125 #define __WEAK_LLSC_MB " sync \n"
127 #define __WEAK_LLSC_MB " \n"
130 #define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
131 #define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
132 #define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
134 #define set_mb(var, value) \
135 do { var = value; smp_mb(); } while (0)
137 #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
138 #define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
139 #define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
141 #endif /* __ASM_BARRIER_H */